Gainward777 commited on
Commit
ac3132e
·
verified ·
1 Parent(s): 5db687f

Update ui/gradio_ui.py

Browse files
Files changed (1) hide show
  1. ui/gradio_ui.py +88 -88
ui/gradio_ui.py CHANGED
@@ -1,88 +1,88 @@
1
- import gradio as gr
2
- from ChatErector import conversation, initializer
3
-
4
- def ui():
5
- # with gr.Blocks(theme=gr.themes.Default(primary_hue="sky")) as demo:
6
- with gr.Blocks(theme=gr.themes.Default(primary_hue="red", secondary_hue="pink", neutral_hue = "sky")) as ui:
7
- #vector_db = gr.State()
8
- qa_chain = gr.State()
9
- gr.HTML("<center><h1>RAG PDF chatbot</h1><center>")
10
- gr.Markdown("""<b>Query your PDF documents!</b> This AI agent is designed to perform retrieval augmented generation (RAG) on PDF documents. The app is hosted on Hugging Face Hub for the sole purpose of demonstration. \
11
- <b>Please do not upload confidential documents.</b>
12
- """)
13
- with gr.Row():
14
- with gr.Column(scale = 86):
15
- gr.Markdown("<b>Step 1 - Upload PDF documents and Initialize RAG pipeline</b>")
16
- with gr.Row():
17
- document = gr.Files(height=300, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload PDF documents")
18
- #with gr.Row():
19
- #db_btn = gr.Button("Create vector database")
20
- #with gr.Row():
21
- #db_progress = gr.Textbox(value="Not initialized", show_label=False) # label="Vector database status",
22
- #gr.Markdown("<style>body { font-size: 16px; }</style><b>Advanced settings</b>")
23
- #with gr.Row():
24
- #llm_btn = gr.Radio(list_llm_simple, label="Available LLMs", value = list_llm_simple[0], type="index") # info="Select LLM", show_label=False
25
- with gr.Row():
26
- with gr.Accordion("Advanced settings", open=False):
27
- with gr.Row():
28
- slider_temperature = gr.Slider(minimum = 0.01, maximum = 1.0, value=0.5, step=0.1, label="Temperature", info="Controls randomness in token generation", interactive=True)
29
- with gr.Row():
30
- slider_maxtokens = gr.Slider(minimum = 128, maximum = 9192, value=4096, step=128, label="Max New Tokens", info="Maximum number of tokens to be generated",interactive=True)
31
- with gr.Row():
32
- slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="top-k", info="Number of tokens to select the next token from", interactive=True)
33
- with gr.Row():
34
- thold = gr.Slider(minimum = 0.01, maximum = 1.0, value=0.8, step=0.1, label="Treshold", info="Retrieved information relevance level", interactive=True)
35
- with gr.Row():
36
- qachain_btn = gr.Button("Initialize Question Answering Chatbot")
37
- with gr.Row():
38
- llm_progress = gr.Textbox(value="Not initialized", show_label=False) # label="Chatbot status",
39
-
40
- with gr.Column(scale = 200):
41
- gr.Markdown("<b>Step 2 - Chat with your Document</b>")
42
- chatbot = gr.Chatbot(height=505)
43
- #with gr.Accordion("Relevent context from the source document", open=False):
44
- #with gr.Row():
45
- #doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20)
46
- #source1_page = gr.Number(label="Page", scale=1)
47
- #with gr.Row():
48
- #doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20)
49
- #source2_page = gr.Number(label="Page", scale=1)
50
- #with gr.Row():
51
- #doc_source3 = gr.Textbox(label="Reference 3", lines=2, container=True, scale=20)
52
- #source3_page = gr.Number(label="Page", scale=1)
53
- with gr.Row():
54
- msg = gr.Textbox(placeholder="Ask a question", container=True)
55
- with gr.Row():
56
- submit_btn = gr.Button("Submit")
57
- clear_btn = gr.ClearButton([msg, chatbot], value="Clear")
58
-
59
- # Preprocessing events
60
- #db_btn.click(initialize_database, \
61
- #inputs=[document], \
62
- #outputs=[vector_db, db_progress])
63
- qachain_btn.click(initializer, \
64
- inputs=[document, slider_temperature, slider_maxtokens, slider_topk, thold], \
65
- outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0,"",0], \
66
- inputs=None, \
67
- outputs=[chatbot] \
68
- queue=False)
69
-
70
- # Chatbot events
71
- msg.submit(conversation, \
72
- inputs=[qa_chain, msg, chatbot], \
73
- outputs=[qa_chain, msg, chatbot], \
74
- queue=False)
75
- submit_btn.click(conversation, \
76
- inputs=[qa_chain, msg, chatbot], \
77
- outputs=[qa_chain, msg, chatbot], \
78
- queue=False)
79
- clear_btn.click(lambda:[None,"",0,"",0,"",0], \
80
- inputs=None, \
81
- outputs=[chatbot], \
82
- queue=False)
83
- ui.queue().launch(debug=True)
84
-
85
-
86
-
87
-
88
-
 
1
+ import gradio as gr
2
+ from ChatErector import conversation, initializer
3
+
4
+ def ui():
5
+ # with gr.Blocks(theme=gr.themes.Default(primary_hue="sky")) as demo:
6
+ with gr.Blocks(theme=gr.themes.Default(primary_hue="red", secondary_hue="pink", neutral_hue = "sky")) as ui:
7
+ #vector_db = gr.State()
8
+ qa_chain = gr.State()
9
+ gr.HTML("<center><h1>RAG PDF chatbot</h1><center>")
10
+ gr.Markdown("""<b>Query your PDF documents!</b> This AI agent is designed to perform retrieval augmented generation (RAG) on PDF documents. The app is hosted on Hugging Face Hub for the sole purpose of demonstration. \
11
+ <b>Please do not upload confidential documents.</b>
12
+ """)
13
+ with gr.Row():
14
+ with gr.Column(scale = 86):
15
+ gr.Markdown("<b>Step 1 - Upload PDF documents and Initialize RAG pipeline</b>")
16
+ with gr.Row():
17
+ document = gr.Files(height=300, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload PDF documents")
18
+ #with gr.Row():
19
+ #db_btn = gr.Button("Create vector database")
20
+ #with gr.Row():
21
+ #db_progress = gr.Textbox(value="Not initialized", show_label=False) # label="Vector database status",
22
+ #gr.Markdown("<style>body { font-size: 16px; }</style><b>Advanced settings</b>")
23
+ #with gr.Row():
24
+ #llm_btn = gr.Radio(list_llm_simple, label="Available LLMs", value = list_llm_simple[0], type="index") # info="Select LLM", show_label=False
25
+ with gr.Row():
26
+ with gr.Accordion("Advanced settings", open=False):
27
+ with gr.Row():
28
+ slider_temperature = gr.Slider(minimum = 0.01, maximum = 1.0, value=0.5, step=0.1, label="Temperature", info="Controls randomness in token generation", interactive=True)
29
+ with gr.Row():
30
+ slider_maxtokens = gr.Slider(minimum = 128, maximum = 9192, value=4096, step=128, label="Max New Tokens", info="Maximum number of tokens to be generated",interactive=True)
31
+ with gr.Row():
32
+ slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="top-k", info="Number of tokens to select the next token from", interactive=True)
33
+ with gr.Row():
34
+ thold = gr.Slider(minimum = 0.01, maximum = 1.0, value=0.8, step=0.1, label="Treshold", info="Retrieved information relevance level", interactive=True)
35
+ with gr.Row():
36
+ qachain_btn = gr.Button("Initialize Question Answering Chatbot")
37
+ with gr.Row():
38
+ llm_progress = gr.Textbox(value="Not initialized", show_label=False) # label="Chatbot status",
39
+
40
+ with gr.Column(scale = 200):
41
+ gr.Markdown("<b>Step 2 - Chat with your Document</b>")
42
+ chatbot = gr.Chatbot(height=505)
43
+ #with gr.Accordion("Relevent context from the source document", open=False):
44
+ #with gr.Row():
45
+ #doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20)
46
+ #source1_page = gr.Number(label="Page", scale=1)
47
+ #with gr.Row():
48
+ #doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20)
49
+ #source2_page = gr.Number(label="Page", scale=1)
50
+ #with gr.Row():
51
+ #doc_source3 = gr.Textbox(label="Reference 3", lines=2, container=True, scale=20)
52
+ #source3_page = gr.Number(label="Page", scale=1)
53
+ with gr.Row():
54
+ msg = gr.Textbox(placeholder="Ask a question", container=True)
55
+ with gr.Row():
56
+ submit_btn = gr.Button("Submit")
57
+ clear_btn = gr.ClearButton([msg, chatbot], value="Clear")
58
+
59
+ # Preprocessing events
60
+ #db_btn.click(initialize_database, \
61
+ #inputs=[document], \
62
+ #outputs=[vector_db, db_progress])
63
+ qachain_btn.click(initializer, \
64
+ inputs=[document, slider_temperature, slider_maxtokens, slider_topk, thold], \
65
+ outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0,"",0], \
66
+ inputs=None, \
67
+ outputs=[chatbot], \
68
+ queue=False)
69
+
70
+ # Chatbot events
71
+ msg.submit(conversation, \
72
+ inputs=[qa_chain, msg, chatbot], \
73
+ outputs=[qa_chain, msg, chatbot], \
74
+ queue=False)
75
+ submit_btn.click(conversation, \
76
+ inputs=[qa_chain, msg, chatbot], \
77
+ outputs=[qa_chain, msg, chatbot], \
78
+ queue=False)
79
+ clear_btn.click(lambda:[None,"",0,"",0,"",0], \
80
+ inputs=None, \
81
+ outputs=[chatbot], \
82
+ queue=False)
83
+ ui.queue().launch(debug=True)
84
+
85
+
86
+
87
+
88
+