File size: 6,445 Bytes
b308128
937be2f
7f877a9
2baca0d
738a5f6
2baca0d
 
 
 
 
 
 
 
738a5f6
8eb0f9a
7eaa7b0
937be2f
 
04fc021
738a5f6
 
2baca0d
 
 
 
 
 
3c712c1
2baca0d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3c712c1
a8ee66f
 
3c712c1
 
 
a8ee66f
3c712c1
 
 
076d731
738a5f6
49c7ae8
076d731
3213aa6
2baca0d
3213aa6
75c4a83
 
738a5f6
 
076d731
 
 
2baca0d
738a5f6
 
076d731
 
 
2baca0d
738a5f6
 
076d731
 
 
2baca0d
 
 
 
 
738a5f6
2baca0d
49c7ae8
35e0ec8
 
edb0bcd
738a5f6
076d731
35e0ec8
738a5f6
9b06190
a450a5f
9b06190
c597e04
9b06190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a04a444
738a5f6
076d731
738a5f6
076d731
738a5f6
076d731
738a5f6
076d731
a450a5f
9b06190
 
 
5e8be56
8c245db
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import time
import os
import openai
import json
import re
import io
import IPython.display
from PIL import Image
import base64 
import requests, json
requests.adapters.DEFAULT_TIMEOUT = 60

# Load the Vicuna 7B v1.3 LMSys model and tokenizer
model_name = "lmsys/vicuna-7b-v1.3"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)

template_single = '''Please output any <{}> in the following sentence one per line without any additional text: "{}"'''

#API Keys
os.environ['OPENAI_API_TOKEN'] = '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'
openai.api_key = os.environ['OPENAI_API_TOKEN']

def chat(system_prompt, user_prompt, model = 'gpt-3.5-turbo', temperature = 0, verbose = False):
    ''' Normal call of OpenAI API '''
    response = openai.ChatCompletion.create(
    temperature = temperature,
    model=model,
    messages=[
        {"role": "system", "content": system_prompt},
        {"role": "user", "content": user_prompt}
    ])
    
    res = response['choices'][0]['message']['content']
    
    if verbose:
        print('System prompt:', system_prompt)
        print('User prompt:', user_prompt)
        print('GPT response:', res)
        
    return res

def format_chat_prompt(message, chat_history, max_convo_length):
    prompt = ""
    for turn in chat_history[-max_convo_length:]:
        user_message, bot_message = turn
        prompt = f"{prompt}\nUser: {user_message}\nAssistant: {bot_message}"
    prompt = f"{prompt}\nUser: {message}\nAssistant:"
    return prompt

def respond_gpt(tab_name, message, chat_history, max_convo_length = 10):
        formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
        print('Prompt + Context:')
        print(formatted_prompt)
        bot_message = chat(system_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text.''',
                           user_prompt = formatted_prompt)
        chat_history.append((message, bot_message))
        return "", chat_history

def respond(message, chat_history):
    input_ids = tokenizer.encode(message, return_tensors="pt")
    output_ids = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
    bot_message = tokenizer.decode(output_ids[0], skip_special_tokens=True)
        
    chat_history.append((message, bot_message))
    time.sleep(2)
    return "", chat_history

def interface(tab_name):
        gr.Markdown(" Description ")

        textbox_prompt = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
        api_key = gr.Textbox(label="Open AI Key", placeholder="Enter your Openai key here", type="password")
        btn = gr.Button("Submit")

        prompt = template_single.format(tab_name, textbox_prompt)

        gr.Markdown("Strategy 1 QA-Based Prompting")
        with gr.Row():
            vicuna_S1_chatbot = gr.Chatbot(label="vicuna-7b")
            llama_S1_chatbot = gr.Chatbot(label="llama-7b")
            gpt_S1_chatbot = gr.Chatbot(label="gpt-3.5")
        clear = gr.ClearButton(components=[textbox_prompt, vicuna_S1_chatbot])
        gr.Markdown("Strategy 2 Instruction-Based Prompting")
        with gr.Row():
            vicuna_S2_chatbot = gr.Chatbot(label="vicuna-7b")
            llama_S2_chatbot = gr.Chatbot(label="llama-7b")
            gpt_S2_chatbot = gr.Chatbot(label="gpt-3.5")
        clear = gr.ClearButton(components=[textbox_prompt, vicuna_S2_chatbot])
        gr.Markdown("Strategy 3 Structured Prompting")
        with gr.Row():
            vicuna_S3_chatbot = gr.Chatbot(label="vicuna-7b")
            llama_S3_chatbot = gr.Chatbot(label="llama-7b")
            gpt_S3_chatbot = gr.Chatbot(label="gpt-3.5")
        clear = gr.ClearButton(components=[textbox_prompt, vicuna_S3_chatbot])

        textbox_prompt.submit(respond, inputs=[textbox_prompt, vicuna_S1_chatbot], outputs=[textbox_prompt, vicuna_S1_chatbot])
        textbox_prompt.submit(respond, inputs=[textbox_prompt, vicuna_S2_chatbot], outputs=[textbox_prompt, vicuna_S2_chatbot])
        textbox_prompt.submit(respond, inputs=[textbox_prompt, vicuna_S3_chatbot], outputs=[textbox_prompt, vicuna_S3_chatbot])

        btn.click(respond_gpt, inputs=[tab_name, textbox_prompt, gpt_S1_chatbot], outputs=[tab_name, textbox_prompt, gpt_S1_chatbot])

with gr.Blocks() as demo:
    gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")

    with gr.Tab("Noun"):
        interface("Noun")
    
    with gr.Tab("Determiner"):
        gr.Markdown(" Description ")

        prompt_CHUNK = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")

        gr.Markdown("Strategy 1 QA")
        with gr.Row():
            vicuna_S1_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
            llama_S1_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
            gpt_S1_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
        clear = gr.ClearButton([prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
        gr.Markdown("Strategy 2 Instruction")
        with gr.Row():
            vicuna_S2_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
            llama_S2_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
            gpt_S2_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
        clear = gr.ClearButton([prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
        gr.Markdown("Strategy 3 Structured Prompting")
        with gr.Row():
            vicuna_S3_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
            llama_S3_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
            gpt_S3_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
        clear = gr.ClearButton([prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
    
    with gr.Tab("Noun phrase"):
        interface("Noun phrase")
    with gr.Tab("Verb phrase"):
        interface("Verb phrase")
    with gr.Tab("Dependent clause"):
        interface("Dependent clause")
    with gr.Tab("T-units"):
        interface("T-units")

    prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S1_chatbot_CHUNK], [prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
    prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S2_chatbot_CHUNK], [prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
    prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S3_chatbot_CHUNK], [prompt_CHUNK, vicuna_S3_chatbot_CHUNK])

demo.launch()