jchen8000 commited on
Commit
f88614f
·
verified ·
1 Parent(s): ed2063e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -30
app.py CHANGED
@@ -19,50 +19,38 @@ print(f"Pyton version {sys.version}.")
19
  vector_store = None
20
 
21
  # Sample PDF file
22
- sample_filenames = ["Attention Is All You Need.pdf",
23
- "Generative Adversarial Nets.pdf",
24
- "Parameter-Efficient Transfer Learning for NLP.pdf",
25
  ]
26
 
27
- sample_desc = """
28
- ### 1. Attention Is All You Need (Vaswani et al., 2017)
29
- This groundbreaking paper introduced the **Transformer** architecture. It revolutionized natural language processing by enabling parallelization and significantly improving performance on tasks like translation, leading to models like *BERT* and *GPT*.
30
-
31
- ### 2. Generative Adversarial Nets (Goodfellow et al., 2014)
32
- This paper proposed **GANs**, a novel framework for generative modeling using two neural networks—a generator and a discriminator—that compete in a zero-sum game. 
33
-
34
- ### 3. Parameter-Efficient Transfer Learning for NLP (Houlsby et al., 2019)
35
- This paper introduces **adapter modules**, a method for fine-tuning large pre-trained language models with significantly fewer parameters.
36
-
37
- It could take several minutes to load and index the files.
38
- """
39
-
40
- rag_desc = """
41
  ### This is a Demo of Retrieval-Augmented Generation (RAG)
42
 
43
  **RAG** is an approach that combines retrieval-based and generative LLM models to improve the accuracy and relevance of generated text. 
44
  It works by first retrieving relevant documents from an external knowledge source (like PDF files) and then using a LLM model to produce responses based on both the input query and the retrieved content. 
45
  This method enhances factual correctness and allows the model to access up-to-date or domain-specific information without retraining.
46
 
 
 
47
 
48
  """
49
 
50
 
51
- examples_questions = [["What is Transformer?"],
52
- ["What is Attention?"],
53
- ["What is Scaled Dot-Product Attention?"],
54
- ["What are Encoder and Decoder?"],
55
- ["Describe more about the Transformer."],
56
- ["Why use self-attention?"],
57
- ["Describe Parameter-Efficient fine-tuning?"],
58
- ["Describe Generative Adversarial Networks?"],
59
- ["How does GAN work?"]
 
60
  ]
61
 
62
  template = \
63
  """Use the following pieces of context to answer the question at the end.
64
- If you don't know the answer, just say that you don't know, don't try to make up an answer.
65
- Always say "Thanks for asking!" at the end of the answer.
66
 
67
  {context}
68
 
@@ -155,13 +143,12 @@ additional_inputs = [
155
  # Create the Gradio interface
156
  with gr.Blocks(theme="Nymbo/Alyx_Theme") as demo:
157
  with gr.Tab("Indexing"):
158
- gr.Markdown(rag_desc)
159
  # pdf_input = gr.File(label="Upload PDF", file_types=[".pdf"])
160
  # pdf_input = gr.Textbox(label="PDF File")
161
  # index_button = gr.Button("Index PDF")
162
  # load_sample = gr.Button("Alternatively, Load and Index [Attention Is All You Need.pdf] as a Sample")
163
  load_sample = gr.Button("Load and Index the following three papers as a RAG Demo")
164
- sample_description = gr.Markdown(sample_desc)
165
  index_output = gr.Textbox(label="Indexing Status")
166
  # index_button.click(index_pdf, inputs=pdf_input, outputs=index_output)
167
  load_sample.click(load_sample_pdf, inputs=None, outputs=index_output)
 
19
  vector_store = None
20
 
21
  # Sample PDF file
22
+ sample_filenames = ["Installation.pdf",
23
+ "User Guide.pdf",
 
24
  ]
25
 
26
+ desc = """
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  ### This is a Demo of Retrieval-Augmented Generation (RAG)
28
 
29
  **RAG** is an approach that combines retrieval-based and generative LLM models to improve the accuracy and relevance of generated text. 
30
  It works by first retrieving relevant documents from an external knowledge source (like PDF files) and then using a LLM model to produce responses based on both the input query and the retrieved content. 
31
  This method enhances factual correctness and allows the model to access up-to-date or domain-specific information without retraining.
32
 
33
+ Click the button below to load a **User Guide** and an **Installation Guide** for the smoke alarm device into the vector database.
34
+ Once you see the message *"PDF indexed successfully!"*, go to the **Chatbot** tab to ask any relevant questions about the device.
35
 
36
  """
37
 
38
 
39
+ examples_questions = [["How long is the lifespan of this smoke alarm?"],
40
+ ["How often should I change the battery?"],
41
+ ["Where should I install the smoke alarm in my home?"],
42
+ ["How do I test if the smoke alarm is working?"],
43
+ ["What should I do if the smoke alarm keeps beeping?"],
44
+ ["Can this smoke alarm detect carbon monoxide too?"],
45
+ ["How do I clean the smoke alarm properly?"],
46
+ ["What type of battery does this smoke alarm use?"],
47
+ ["How loud is the smoke alarm when it goes off?"],
48
+ ["Can I install this smoke alarm on a wall instead of a ceiling?"],
49
  ]
50
 
51
  template = \
52
  """Use the following pieces of context to answer the question at the end.
53
+ If you don't know the answer, just say you don't know because no relevant information in the provided documents, don't try to make up an answer.
 
54
 
55
  {context}
56
 
 
143
  # Create the Gradio interface
144
  with gr.Blocks(theme="Nymbo/Alyx_Theme") as demo:
145
  with gr.Tab("Indexing"):
146
+ gr.Markdown(desc)
147
  # pdf_input = gr.File(label="Upload PDF", file_types=[".pdf"])
148
  # pdf_input = gr.Textbox(label="PDF File")
149
  # index_button = gr.Button("Index PDF")
150
  # load_sample = gr.Button("Alternatively, Load and Index [Attention Is All You Need.pdf] as a Sample")
151
  load_sample = gr.Button("Load and Index the following three papers as a RAG Demo")
 
152
  index_output = gr.Textbox(label="Indexing Status")
153
  # index_button.click(index_pdf, inputs=pdf_input, outputs=index_output)
154
  load_sample.click(load_sample_pdf, inputs=None, outputs=index_output)