File size: 3,412 Bytes
5f0df75
 
 
cb89ece
8ea257e
 
 
 
5f0df75
 
 
 
 
 
 
 
 
 
8ea257e
5f0df75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import gradio as gr
from utils import *

file_path = '../instructions.txt'

with open(file_path, 'r', encoding='utf-8') as file:
    instruction_text = file.read()

with gr.Blocks(gr.themes.Soft(primary_hue=gr.themes.colors.slate, secondary_hue=gr.themes.colors.purple)) as demo:
    with gr.Row():

        with gr.Column(scale=1, variant = 'panel'):
            # gr.HTML(f"<img src='file/logo.png' width='100' height='100'>")
            files = gr.File(type="filepath", file_count="multiple")
            with gr.Row(equal_height=True):
                vector_index_btn = gr.Button('Create vector store', variant='primary',scale=1)
                vector_index_msg_out = gr.Textbox(show_label=False, lines=1,scale=1, placeholder="Creating vectore store ...")
            
            instruction = gr.Textbox(label="System instruction", lines=3, value=instruction_text)

            with gr.Accordion(label="Text generation tuning parameters"):
                temperature = gr.Slider(label="temperature", minimum=0.1, maximum=1, value=0.1, step=0.05)
                max_new_tokens = gr.Slider(label="max_new_tokens", minimum=1, maximum=4096, value=1024, step=1)
                k_context=gr.Slider(label="k_context", minimum=1, maximum=15, value=5, step=1)

            vector_index_btn.click(upload_and_create_vector_store, inputs=[files], outputs=vector_index_msg_out)

        with gr.Column(scale=1, variant = 'panel'):
            """
            with gr.Row(equal_height=True):

                with gr.Column(scale=1):
                    llm = gr.Dropdown(choices= ["gpt-3.5-turbo", "gpt-3.5-turbo-instruct", "gpt-3.5-turbo-16k", "gpt-4", "gpt-4-32k"], 
                                       label="Select the model")

                with gr.Column(scale=1):
                    model_load_btn = gr.Button('Load model', variant='primary',scale=2)
                    load_success_msg = gr.Textbox(show_label=False,lines=1, placeholder="Model loading ...")
            """
            with gr.Row(equal_height=True):
                model_load_btn = gr.Button('Load model', variant='primary',scale=2)
                load_success_msg = gr.Textbox(show_label=False,lines=1, placeholder="Model loading ...")
            
            chatbot = gr.Chatbot([], elem_id="chatbot", label='Chatbox', height=600)

            txt = gr.Textbox(label= "Question",lines=2,placeholder="Enter your question and press shift+enter ")

            with gr.Row():

                with gr.Column(scale=1):
                    submit_btn = gr.Button('Submit',variant='primary', size = 'sm')

                with gr.Column(scale=1):
                    clear_btn = gr.Button('Clear',variant='stop',size = 'sm')

            model_load_btn.click(load_models, [],load_success_msg, api_name="load_models")

            txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then(
                bot, [chatbot,instruction,temperature,max_new_tokens,k_context], chatbot)
            submit_btn.click(add_text, [chatbot, txt], [chatbot, txt]).then(
                bot, [chatbot,instruction,temperature, max_new_tokens,k_context], chatbot).then(
                    clear_cuda_cache, None, None
                )

            clear_btn.click(lambda: None, None, chatbot, queue=False)

if __name__ == '__main__':
    # demo.queue(concurrency_count=3)
    demo.launch(server_name="0.0.0.0", server_port=7860, show_api=False)