File size: 3,904 Bytes
b308128
8c245db
9272cb4
f9ca505
b308128
7eaa7b0
 
 
 
04fc021
8c245db
28ca6ce
cab4ff3
 
d366b82
 
2acec65
a7472ee
2acec65
b4af604
 
cab4ff3
a7472ee
 
 
df3b804
 
a7472ee
 
 
df3b804
 
a7472ee
 
 
b4af604
cab4ff3
d366b82
 
2acec65
 
 
b4af604
df3b804
 
 
 
 
 
 
 
 
 
 
cab4ff3
df3b804
 
006127c
b4af604
df3b804
ac4f141
9272cb4
a7472ee
 
 
 
9272cb4
 
2ba8da5
5425a6b
 
a7472ee
 
 
 
5425a6b
 
a7472ee
 
 
9272cb4
a7472ee
5425a6b
 
 
ac4f141
5e8be56
8c245db
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import gradio as gr
import random
import time
from transformers import AutoModelForCausalLM, AutoTokenizer

# Load Vicuna 7B model and tokenizer
model_name = "lmsys/vicuna-7b-v1.3"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

with gr.Blocks() as demo:
    gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")

    with gr.Tab("POS"):
        gr.Markdown(" Description ")

        with gr.Row():
            prompt_POS = gr.Textbox(show_label=False, placeholder="Enter prompt")
            send_button_POS = gr.Button("Send", scale=0)

        gr.Markdown("Strategy 1 QA")
        with gr.Row():
            vicuna_chatbot1_POS = gr.Chatbot(label="vicuna-7b", live=True)
            llama_chatbot1_POS = gr.Chatbot(label="llama-7b", live=False)
            gpt_chatbot1_POS = gr.Chatbot(label="gpt-3.5", live=False)
        gr.Markdown("Strategy 2 Instruction")
        with gr.Row():
            vicuna_chatbot2_POS = gr.Chatbot(label="vicuna-7b", live=True)
            llama_chatbot2_POS = gr.Chatbot(label="llama-7b", live=False)
            gpt_chatbot2_POS = gr.Chatbot(label="gpt-3.5", live=False)
        gr.Markdown("Strategy 3 Structured Prompting")
        with gr.Row():
            vicuna_chatbot3_POS = gr.Chatbot(label="vicuna-7b", live=True)
            llama_chatbot3_POS = gr.Chatbot(label="llama-7b", live=False)
            gpt_chatbot3_POS = gr.Chatbot(label="gpt-3.5", live=False)
        
    with gr.Tab("Chunk"):
        gr.Markdown(" Description 2 ")

        with gr.Row():
            prompt_chunk = gr.Textbox(show_label=False, placeholder="Enter prompt")
            send_button_Chunk = gr.Button("Send", scale=0)

        gr.Markdown("Strategy 1 QA")
        with gr.Row():
            vicuna_chatbot1_chunk = gr.Chatbot(label="vicuna-7b", live=True)
            llama_chatbot1_chunk = gr.Chatbot(label="llama-7b", live=False)
            gpt_chatbot1_chunk = gr.Chatbot(label="gpt-3.5", live=False)
        gr.Markdown("Strategy 2 Instruction")
        with gr.Row():
            vicuna_chatbot2_chunk = gr.Chatbot(label="vicuna-7b", live=True)
            llama_chatbot2_chunk = gr.Chatbot(label="llama-7b", live=False)
            gpt_chatbot2_chunk = gr.Chatbot(label="gpt-3.5", live=False)
        gr.Markdown("Strategy 3 Structured Prompting")
        with gr.Row():
            vicuna_chatbot3_chunk = gr.Chatbot(label="vicuna-7b", live=True)
            llama_chatbot3_chunk = gr.Chatbot(label="llama-7b", live=False)
            gpt_chatbot3_chunk = gr.Chatbot(label="gpt-3.5", live=False)
        
        clear = gr.ClearButton([prompt_chunk, vicuna_chatbot1_chunk])

    # Define the function for generating responses
    def generate_response(prompt):
        input_ids = tokenizer.encode(prompt, return_tensors="pt")
        output_ids = model.generate(input_ids, max_length=500, pad_token_id=tokenizer.eos_token_id)
        response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
        return response

    # Define the Gradio interface
    def chatbot_interface_POS(input_dict):
        prompt_POS = input_dict["prompt_POS"]
        vicuna_response_POS = generate_response(prompt_POS)
        # Add responses from other chatbots if needed
        return {"Vicuna-7B": vicuna_response_POS}

    def chatbot_interface_Chunk(input_dict):
        prompt_chunk = input_dict["prompt_chunk"]
        vicuna_response_chunk = generate_response(prompt_chunk)
        # Add responses from other chatbots if needed
        return {"Vicuna-7B": vicuna_response_chunk}

    # Connect the interfaces to the functions
    send_button_POS.click(chatbot_interface_POS, {"prompt_POS": prompt_POS, "vicuna_chatbot1_POS": vicuna_chatbot1_POS})
    send_button_Chunk.click(chatbot_interface_Chunk, {"prompt_chunk": prompt_chunk, "vicuna_chatbot1_chunk": vicuna_chatbot1_chunk})



demo.launch()