File size: 7,578 Bytes
7279c69
fc616b7
055baa9
aeb1340
7eb4640
 
 
 
 
 
 
 
 
33177b1
edb320d
0c5910e
 
aeb1340
 
 
9994f95
f88614f
 
25656b2
9994f95
f88614f
8883910
 
 
 
 
 
8df017e
52c1645
8883910
52c1645
8df017e
30f179c
8883910
b926bbf
 
8883910
f88614f
 
 
 
 
 
 
 
 
 
b051e7d
f16ca94
055baa9
 
f88614f
055baa9
 
 
 
 
 
 
 
0fad869
 
 
 
 
 
 
edb320d
0fad869
 
 
 
 
 
 
 
 
 
2e7924a
0fad869
1593c74
0fad869
1593c74
25656b2
 
1593c74
 
25656b2
54d4ede
edb320d
0fad869
25656b2
0fad869
515f14b
0fad869
 
055baa9
0fad869
 
 
52c1645
0fad869
 
 
 
055baa9
0fad869
25b992d
0fad869
 
055baa9
0fad869
 
055baa9
0fad869
 
 
7a301d5
 
23ce790
7a301d5
0fad869
 
 
 
 
 
 
 
055baa9
54d4ede
515f14b
0fad869
a4018ab
0fad869
 
 
 
 
 
54d4ede
fb473f6
25b992d
8599fab
f88614f
04937cf
a4018ab
04937cf
8d193b1
b926bbf
911c1ac
04937cf
1593c74
8599fab
caa9983
d9c7c28
25b992d
 
 
 
e503691
25b992d
e503691
 
25b992d
 
 
 
 
 
 
6b268d2
23ce790
8599fab
7d2c69b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
import os
import sys
import random
import gradio as gr
from langchain_community.document_loaders import PyPDFLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain_groq import ChatGroq
from langchain_core.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough


print(f"Pyton version {sys.version}.")

# Initialize the FAISS vector store
vector_store = None

# Sample PDF file
sample_filenames = ["Installation.pdf",
                   "User Guide.pdf",
                  ]

desc = """
### This is a Demo of Retrieval-Augmented Generation (RAG)

**RAG** is an approach that combines retrieval-based and generative LLM models to improve the accuracy and relevance of generated text.  
It works by first retrieving relevant documents from an external knowledge source (like PDF files) and then using a LLM model to produce responses based on both the input query and the retrieved content.  
This method enhances factual correctness and allows the model to access up-to-date or domain-specific information without retraining.

Click the button below to load a **User Guide** and an **Installation Guide** for a smoke alarm device into the vector database. It could take a couple of minutes to process.
Once you see the message *"PDF(s) indexed successfully!"*, go to the **Chatbot** tab to ask any relevant questions about the device.

You can change the LLM models in the **Additional Inputs** at the bottom of the **Chatbot** tab, in case of certain model is out of date. You can also adjust the LLM parameters there.

"""

sample_button = "Load User Guide and Installation Guide documents"


examples_questions = [["How long is the lifespan of this smoke alarm?"],
            ["How often should I change the battery?"],
            ["Where should I install the smoke alarm in my home?"],
            ["How do I test if the smoke alarm is working?"],
            ["What should I do if the smoke alarm keeps beeping?"],
            ["Can this smoke alarm detect carbon monoxide too?"],
            ["How do I clean the smoke alarm properly?"],
            ["What type of battery does this smoke alarm use?"],
            ["How loud is the smoke alarm when it goes off?"],
            ["Can I install this smoke alarm on a wall instead of a ceiling?"],
        ]

template = \
"""Use the following pieces of context to answer the question at the end.
If you don't know the answer, just say you don't know because no relevant information in the provided documents, don't try to make up an answer.

{context}

Question: {question}

Answer:
"""

# Function to handle PDF upload and indexing
def index_pdf(pdf):
    global vector_store
    
    # Load the PDF
    loader = PyPDFLoader(pdf.name)
    documents = loader.load()

    # Split the documents into chunks
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
    texts = text_splitter.split_documents(documents)

    # Embed the chunks 
    embeddings = HuggingFaceEmbeddings(model_name="bert-base-uncased", encode_kwargs={"normalize_embeddings": True})

    # Store the embeddings in the vector store
    vector_store = FAISS.from_documents(texts, embeddings)

    return "PDF(s) indexed successfully!"

def load_sample_pdf():
    global vector_store
    documents = []    

    # Load the PDFs
    for file in sample_filenames:
        loader = PyPDFLoader(file)
        documents.extend(loader.load())
        # print(f"{file} is processed!")

    # Split the documents into chunks
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=64)
    texts = text_splitter.split_documents(documents)

    # Embed the chunks 
    embeddings = HuggingFaceEmbeddings(model_name="bert-base-uncased", encode_kwargs={"normalize_embeddings": True})

    # Store the embeddings in the vector store
    vector_store = FAISS.from_documents(texts, embeddings)

    return "PDF(s) indexed successfully!"


def format_docs(docs):
    return "\n\n".join(doc.page_content for doc in docs)

def generate_response(query, history, model, temperature, max_tokens, top_p, seed):
  
    if vector_store is None:
        return "Please upload and index a PDF at the Indexing tab."

    if seed == 0:
        seed = random.randint(1, 100000)

    retriever = vector_store.as_retriever(search_type="similarity", search_kwargs={"k": 16})
    llm = ChatGroq(groq_api_key=os.environ.get("GROQ_API_KEY"), model=model)
    custom_rag_prompt = PromptTemplate.from_template(template)

    docs = retriever.invoke(query)
    relevant_info = format_docs(docs)
    
    rag_chain = (
        {"context": retriever | format_docs, "question": RunnablePassthrough()}
        | custom_rag_prompt
        | llm
        | StrOutputParser()
    )

    response = rag_chain.invoke(query)
   
    return response, relevant_info

additional_inputs = [
    gr.Dropdown(choices=["llama-3.3-70b-versatile", "llama-3.1-8b-instant", "llama3-70b-8192", "llama3-8b-8192", "mixtral-8x7b-32768", "gemma2-9b-it"], value="gemma2-9b-it", label="Model"),
    gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.5, label="Temperature", info="Controls diversity of the generated text. Lower is more deterministic, higher is more creative."),
    gr.Slider(minimum=1, maximum=8000, step=1, value=8000, label="Max Tokens", info="The maximum number of tokens that the model can process in a single response.<br>Maximums: 8k for gemma 7b it, gemma2 9b it, llama 7b & 70b, 32k for mixtral 8x7b, 132k for llama 3.1."),
    gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.5, label="Top P", info="A method of text generation where a model will only consider the most probable next tokens that make up the probability p."),
    gr.Number(precision=0, value=0, label="Seed", info="A starting point to initiate generation, use 0 for random")
]


# Create the Gradio interface
with gr.Blocks(theme=gr.themes.Default()) as demo:
    with gr.Tab("Indexing"):
        gr.Markdown(desc)
        # pdf_input = gr.File(label="Upload PDF", file_types=[".pdf"])
        # pdf_input = gr.Textbox(label="PDF File")
        # index_button = gr.Button("Index PDF")
        # load_sample = gr.Button("Alternatively, Load and Index [Attention Is All You Need.pdf] as a Sample")
        load_sample = gr.Button(sample_button)
        index_output = gr.Textbox(label="Indexing Status")
        # index_button.click(index_pdf, inputs=pdf_input, outputs=index_output)
        load_sample.click(load_sample_pdf, inputs=None, outputs=index_output)
    
    with gr.Tab("Chatbot"):
        with gr.Row():
            with gr.Column():
                gr.ChatInterface(
                    fn=generate_response, 
                    chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
                    examples=examples_questions,
                    additional_inputs=additional_inputs,
                    cache_examples=False,
                )
            # with gr.Column():
            #     retrieve_button = gr.Button("Retrieve Relevant Info")
            #     relevant_info = gr.Textbox(
            #         label="Retrieved Information",
            #         interactive=False,
            #         lines=20, 
            #     )

            
# Launch the Gradio app
demo.launch(share=True)