File size: 6,567 Bytes
3ec9224
5be8df6
3ec9224
5be8df6
 
 
 
 
 
 
 
 
3ec9224
1ef8d7c
 
 
5be8df6
 
 
 
 
 
fc1e558
 
b1ec9ac
5be8df6
 
 
 
 
 
 
fc1e558
 
5be8df6
 
 
1ef8d7c
5be8df6
1ef8d7c
5be8df6
 
 
1ef8d7c
 
5be8df6
 
 
 
fc1e558
 
 
 
5be8df6
 
9733941
5be8df6
 
fc1e558
5be8df6
 
 
 
 
9733941
138ca2e
5be8df6
 
00bd139
5be8df6
fc1e558
5be8df6
fc1e558
5be8df6
1ef8d7c
fc1e558
 
 
 
 
 
 
 
5be8df6
 
 
 
 
 
 
 
00bd139
5be8df6
 
9733941
04361a6
 
9733941
 
 
8bef1bd
9733941
 
8bef1bd
9733941
8bef1bd
5be8df6
 
 
 
 
 
 
 
 
 
3ca2785
00bd139
1ef8d7c
fc1e558
5be8df6
1da1e92
eb94a8f
 
9733941
ceae871
5be8df6
fc1e558
5be8df6
da842e1
fc1e558
a25f0eb
5be8df6
fc1e558
5be8df6
fc1e558
5be8df6
a25f0eb
5be8df6
fc1e558
a25f0eb
e4c8a25
fc1e558
9733941
fc1e558
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import gradio as gr
import os

from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.chains import ConversationalRetrievalChain
from langchain.embeddings import HuggingFaceEmbeddings 
from langchain.llms import HuggingFacePipeline
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain.llms import HuggingFaceHub

from pathlib import Path
import chromadb

from transformers import AutoTokenizer
import transformers
import torch
import tqdm 
import accelerate

# Update list of LLM models
list_llm = ["mistralai/Mistral-7B-Instruct-v0.2"]
list_llm_simple = [os.path.basename(llm) for llm in list_llm]

def load_doc(list_file_path, chunk_size, chunk_overlap):
    loaders = [PyPDFLoader(x) for x in list_file_path]
    pages = []
    for loader in loaders:
        pages.extend(loader.load())
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=chunk_size,
        chunk_overlap=chunk_overlap)
    doc_splits = text_splitter.split_documents(pages)
    return doc_splits

def create_db(splits, collection_name):
    embedding = HuggingFaceEmbeddings()
    new_client = chromadb.EphemeralClient()
    vectordb = Chroma.from_documents(
        documents=splits,
        embedding=embedding,
        client=new_client,
        collection_name=collection_name,
    )
    return vectordb

def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
    llm = HuggingFaceHub(
        repo_id=llm_model,
        model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens, "top_k": top_k}
    )
    memory = ConversationBufferMemory(
        memory_key="chat_history",
        output_key='answer',
        return_messages=True
    )
    retriever = vector_db.as_retriever()
    qa_chain = ConversationalRetrievalChain.from_llm(
        llm,
        retriever=retriever,
        chain_type="stuff", 
        memory=memory,
        return_source_documents=True,
        verbose=False,
    )
    progress(0.9, desc="Done!")
    return qa_chain

def initialize_database(list_file_obj, chunk_size, chunk_overlap, llm_temperature, max_tokens, top_k, progress=gr.Progress()):
    list_file_path = [x.name for x in list_file_obj if x is not None]
    collection_name = Path(list_file_path[0]).stem.replace(" ", "-")[:50]
    doc_splits = load_doc(list_file_path, chunk_size, chunk_overlap)
    vector_db = create_db(doc_splits, collection_name)
    qa_chain = initialize_llmchain(
        list_llm[0], 
        llm_temperature, 
        max_tokens, 
        top_k, 
        vector_db, 
        progress)
    return vector_db, collection_name, qa_chain, "Complete!"

def format_chat_history(message, chat_history):
    formatted_chat_history = []
    for user_message, bot_message in chat_history:
        formatted_chat_history.append(f"User: {user_message}")
        formatted_chat_history.append(f"Assistant: {bot_message}")
    return formatted_chat_history

def conversation(qa_chain, message, history):
    formatted_chat_history = format_chat_history(message, history)
    response = qa_chain({"question": message, "chat_history": formatted_chat_history})
    response_answer = response["answer"]
    if response_answer.find("Helpful Answer:") != -1:
        response_answer = response_answer.split("Helpful Answer:")[-1]
    response_sources = response["source_documents"]
    response_source1 = response_sources[0].page_content.strip()
    response_source2 = response_sources[1].page_content.strip()
    response_source3 = response_sources[2].page_content.strip()
    response_source1_page = response_sources[0].metadata["page"] + 1
    response_source2_page = response_sources[1].metadata["page"] + 1
    response_source3_page = response_sources[2].metadata["page"] + 1
    new_history = history + [(message, response_answer)]
    return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page

def upload_file(file_obj):
    list_file_path = []
    for idx, file in enumerate(file_obj):
        file_path = file_obj.name
        list_file_path.append(file_path)
    return list_file_path

def demo():
    with gr.Blocks(theme="base") as demo:
        vector_db = gr.State()
        qa_chain = gr.State()
        collection_name = gr.State()

        gr.Markdown(
        """<center><h2>PDF-based chatbot (powered by LangChain and open-source LLMs)</center></h2>
        <h3>Ask any questions about your PDF documents, along with follow-ups</h3>
        <b>Note:</b> This AI assistant performs retrieval-augmented generation from your PDF documents. \
        When generating answers, it takes past questions into account (via conversational memory), and includes document references for clarity purposes.</i>
        <br><b>Warning:</b> This space uses the free CPU Basic hardware from Hugging Face. Some steps and LLM models used below (free inference endpoints) can take some time to generate an output.<br>
        """)
        with gr.Tab("Chatbot"):
            with gr.Row():
                document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
                db_btn = gr.Button("Generate vector database...")
            with gr.Accordion("Advanced options - Document text splitter", open=False):
                with gr.Row():
                    slider_chunk_size = gr.Slider(minimum=100, maximum=1000, value=600, step=20, label="Chunk size", info="Chunk size", interactive=True)
                with gr.Row():
                    slider_chunk_overlap = gr.Slider(minimum=10, maximum=200, value=40, step=10, label="Chunk overlap", info="Chunk overlap", interactive=True)
            with gr.Row():
                db_progress = gr.Textbox(label="Vector database initialization", value="None")
            with gr.Row():
                llm_btn = gr.Radio(list_llm_simple, label="LLM models", value=list_llm_simple[0], type="index", info="Choose your LLM model")
            with gr.Accordion("Advanced options - LLM model", open=False):
                with gr.Row():
                    slider_temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
                with gr.Row():
                    slider_maxtokens = gr