LingEval / app.py
research14's picture
more components to clear button, strat change gpt
e02ba6b
raw
history blame
16.2 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import lftk
import spacy
import time
import os
import openai
# Load the Vicuna 7B model and tokenizer
vicuna_tokenizer = AutoTokenizer.from_pretrained("lmsys/vicuna-7b-v1.3")
vicuna_model = AutoModelForCausalLM.from_pretrained("lmsys/vicuna-7b-v1.3")
# Load the LLaMA 7b model and tokenizer
llama_tokenizer = AutoTokenizer.from_pretrained("daryl149/llama-2-7b-chat-hf")
llama_model = AutoModelForCausalLM.from_pretrained("daryl149/llama-2-7b-chat-hf")
def update_api_key(new_key):
print("update_api_key ran")
global api_key
os.environ['OPENAI_API_TOKEN'] = new_key
openai.api_key = os.environ['OPENAI_API_TOKEN']
def chat(system_prompt, user_prompt, model = 'gpt-3.5-turbo', temperature = 0, verbose = False):
''' Normal call of OpenAI API '''
response = openai.ChatCompletion.create(
temperature = temperature,
model=model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
])
res = response['choices'][0]['message']['content']
if verbose:
print('System prompt:', system_prompt)
print('User prompt:', user_prompt)
print('GPT response:', res)
return res
def format_chat_prompt(message, chat_history, max_convo_length):
prompt = ""
for turn in chat_history[-max_convo_length:]:
user_message, bot_message = turn
prompt = f"{prompt}\nUser: {user_message}\nAssistant: {bot_message}"
prompt = f"{prompt}\nUser: {message}\nAssistant:"
return prompt
def gpt_respond(tab_name, message, chat_history, max_convo_length = 10):
formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
print('Prompt + Context:')
print(formatted_prompt)
bot_message = chat(system_prompt = f'''Generate the output only for the assistant. Output any <{tab_name}> in the following sentence one per line without any additional text.''',
user_prompt = formatted_prompt)
chat_history.append((message, bot_message))
return "", chat_history
def vicuna_respond(tab_name, message, chat_history):
formatted_prompt = f'''Generate the output only for the assistant. Output any {tab_name} in the following sentence one per line without any additional text: {message}'''
print('Vicuna Ling Ents Fn - Prompt + Context:')
print(formatted_prompt)
input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
output_ids = vicuna_model.generate(input_ids, do_sample=True, max_length=1024, num_beams=5, no_repeat_ngram_size=2)
bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
print(bot_message)
# Remove formatted prompt from bot_message
bot_message = bot_message.replace(formatted_prompt, '')
print(bot_message)
chat_history.append((formatted_prompt, bot_message))
time.sleep(2)
return tab_name, "", chat_history
def llama_respond(tab_name, message, chat_history):
formatted_prompt = f'''Generate the output only for the assistant. Output any {tab_name} in the following sentence one per line without any additional text: {message}'''
# print('Llama - Prompt + Context:')
# print(formatted_prompt)
input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
output_ids = llama_model.generate(input_ids, do_sample=True, max_length=1024, num_beams=5, no_repeat_ngram_size=2)
bot_message = llama_tokenizer.decode(output_ids[0], skip_special_tokens=True)
# Remove formatted prompt from bot_message
bot_message = bot_message.replace(formatted_prompt, '')
# print(bot_message)
chat_history.append((formatted_prompt, bot_message))
time.sleep(2)
return tab_name, "", chat_history
def gpt_strategies_respond(have_key, strategy, task_name, task_ling_ent, message, chat_history, max_convo_length = 10):
if (have_key == "No"):
return "", chat_history
formatted_system_prompt = ""
if (task_name == "POS Tagging"):
if (strategy == "S1"):
formatted_system_prompt = f'''Generate the output only for the assistant. Output any {task_ling_ent} in the following sentence one per line without any additional text: {message}'''
elif (strategy == "S2"):
formatted_system_prompt = f'''POS tag the following sentence using Universal POS tag set without generating any additional text: {message}'''
elif (strategy == "S3"):
formatted_system_prompt = f'''POS tag the following sentence using Universal POS tag set without generating any additional text: {message}'''
elif (task_name == "Chunking"):
if (strategy == "S1"):
formatted_system_prompt = f'''Generate the output only for the assistant. Output any {task_ling_ent} in the following sentence one per line without any additional text: {message}'''
elif (strategy == "S2"):
formatted_system_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
elif (strategy == "S3"):
formatted_system_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
print('Prompt + Context:')
print(formatted_prompt)
bot_message = chat(system_prompt = formatted_system_prompt,
user_prompt = formatted_prompt)
chat_history.append((message, bot_message))
return "", chat_history
def vicuna_strategies_respond(strategy, task_name, task_ling_ent, message, chat_history):
formatted_prompt = ""
if (task_name == "POS Tagging"):
if (strategy == "S1"):
formatted_prompt = f'''Generate the output only for the assistant. Output any {task_ling_ent} in the following sentence one per line without any additional text: {message}'''
elif (strategy == "S2"):
formatted_prompt = f'''POS tag the following sentence using Universal POS tag set without generating any additional text: {message}'''
elif (strategy == "S3"):
formatted_prompt = f'''POS tag the following sentence using Universal POS tag set without generating any additional text: {message}'''
elif (task_name == "Chunking"):
if (strategy == "S1"):
formatted_prompt = f'''Generate the output only for the assistant. Output any {task_ling_ent} in the following sentence one per line without any additional text: {message}'''
elif (strategy == "S2"):
formatted_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
elif (strategy == "S3"):
formatted_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
print('Vicuna Strategy Fn - Prompt + Context:')
print(formatted_prompt)
input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
output_ids = vicuna_model.generate(input_ids, do_sample=True, max_length=1024, num_beams=5, no_repeat_ngram_size=2)
bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
print(bot_message)
# Remove formatted prompt from bot_message
bot_message = bot_message.replace(formatted_prompt, '')
print(bot_message)
chat_history.append((formatted_prompt, bot_message))
time.sleep(2)
return task_name, "", chat_history
def llama_strategies_respond(strategy, task_name, task_ling_ent, message, chat_history):
formatted_prompt = ""
if (task_name == "POS Tagging"):
if (strategy == "S1"):
formatted_prompt = f'''Generate the output only for the assistant. Output any {task_ling_ent} in the following sentence one per line without any additional text: {message}'''
elif (strategy == "S2"):
formatted_prompt = f'''POS tag the following sentence using Universal POS tag set without generating any additional text: {message}'''
elif (strategy == "S3"):
formatted_prompt = f'''POS tag the following sentence using Universal POS tag set without generating any additional text: {message}'''
elif (task_name == "Chunking"):
if (strategy == "S1"):
formatted_prompt = f'''Generate the output only for the assistant. Output any {task_ling_ent} in the following sentence one per line without any additional text: {message}'''
elif (strategy == "S2"):
formatted_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
elif (strategy == "S3"):
formatted_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
# print('Llama Strategies - Prompt + Context:')
# print(formatted_prompt)
input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
output_ids = llama_model.generate(input_ids, do_sample=True, max_length=1024, num_beams=5, no_repeat_ngram_size=2)
bot_message = llama_tokenizer.decode(output_ids[0], skip_special_tokens=True)
# print(bot_message)
# Remove formatted prompt from bot_message
bot_message = bot_message.replace(formatted_prompt, '')
# print(bot_message)
chat_history.append((formatted_prompt, bot_message))
time.sleep(2)
return task_name, "", chat_history
def interface():
with gr.Tab("Linguistic Entities"):
gr.Markdown("""
## 📜 Step-By-Step Instructions
- Enter a sentence for three models to process (Vicuna-7b, LLaMA-7b and GPT-3.5).
- If you own an OpenAI API key, select 'Yes' in the dropdown. If you don't own one, select 'No'.
- If you selected 'Yes', enter your OpenAI API Key [Link to your OpenAI keys](https://platform.openai.com/api-keys).
- If you selected 'No', leave the 'OpenAI Key' field blank and continue with the rest.
- Select a Task from the Dropdown.
- Select a Linguistic Entity from the Dropdown.
- Click 'Submit' to send your inputs to the models.
- To enter a new prompt, scroll to the bottom and click 'Clear' to start again.
### ⏳ After you click 'Submit', the models will take a couple seconds to process your inputs.
### 🤖 Then, the models will output the POS Tagging or Chunking in your prompt with three different strategies based on your selections!
Note: If you get an 'Error' in the gpt-3.5 model, check the following:
- Check that you entered your key correctly without any extra characters.
- If you used a free key, it means you exceeded your quota from the free API Key.
""")
# Inputs
task_prompt = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
with gr.Row():
have_key = gr.Dropdown(["Yes", "No"], label="Do you own an API Key?", scale=0.5)
task_apikey_input = gr.Textbox(label="Open AI Key", placeholder="Enter your OpenAI key here", type="password", visible=True)
task = gr.Dropdown(["POS Tagging", "Chunking"], label="Task")
task_linguistic_entities = gr.Dropdown(["Noun", "Determiner", "Noun phrase", "Verb phrase", "Dependent clause", "T-units"], label="Linguistic Entity For Strategy 1")
task_btn = gr.Button(value="Submit")
# Outputs
gr.Markdown("### Strategy 1 - QA-Based Prompting")
strategy1 = gr.Markdown("S1", visible=False)
with gr.Row():
vicuna_S1_chatbot = gr.Chatbot(label="vicuna-7b")
llama_S1_chatbot = gr.Chatbot(label="llama-7b")
gpt_S1_chatbot = gr.Chatbot(label="gpt-3.5")
gr.Markdown("### Strategy 2 - Instruction-Based Prompting")
strategy2 = gr.Markdown("S2", visible=False)
with gr.Row():
vicuna_S2_chatbot = gr.Chatbot(label="vicuna-7b")
llama_S2_chatbot = gr.Chatbot(label="llama-7b")
gpt_S2_chatbot = gr.Chatbot(label="gpt-3.5")
gr.Markdown("### Strategy 3 - Structured Prompting")
strategy3 = gr.Markdown("S3", visible=False)
with gr.Row():
vicuna_S3_chatbot = gr.Chatbot(label="vicuna-7b")
llama_S3_chatbot = gr.Chatbot(label="llama-7b")
gpt_S3_chatbot = gr.Chatbot(label="gpt-3.5")
clear_all = gr.ClearButton(components=[task_prompt, task_apikey_input, have_key, task, task_linguistic_entities,
vicuna_S1_chatbot, llama_S1_chatbot, gpt_S1_chatbot,
vicuna_S2_chatbot, llama_S2_chatbot, gpt_S2_chatbot,
vicuna_S3_chatbot, llama_S3_chatbot, gpt_S3_chatbot])
# Event Handler for API Key
task_btn.click(update_api_key, inputs=task_apikey_input)
# vicuna_strategies_respond(strategy, task_name, task_ling_ent, message, chat_history):
# Event Handlers for Vicuna Chatbot POS/Chunk
task_btn.click(vicuna_strategies_respond, inputs=[strategy1, task, task_linguistic_entities, task_prompt, vicuna_S1_chatbot],
outputs=[task, task_prompt, vicuna_S1_chatbot])
task_btn.click(vicuna_strategies_respond, inputs=[strategy2, task, task_linguistic_entities, task_prompt, vicuna_S2_chatbot],
outputs=[task, task_prompt, vicuna_S2_chatbot])
task_btn.click(vicuna_strategies_respond, inputs=[strategy3, task, task_linguistic_entities, task_prompt, vicuna_S3_chatbot],
outputs=[task, task_prompt, vicuna_S3_chatbot])
# Event Handler for LLaMA Chatbot POS/Chunk
task_btn.click(llama_strategies_respond, inputs=[strategy1, task, task_linguistic_entities, task_prompt, llama_S1_chatbot],
outputs=[task, task_prompt, llama_S1_chatbot])
task_btn.click(llama_strategies_respond, inputs=[strategy2, task, task_linguistic_entities, task_prompt, llama_S2_chatbot],
outputs=[task, task_prompt, llama_S2_chatbot])
task_btn.click(llama_strategies_respond, inputs=[strategy3, task, task_linguistic_entities, task_prompt, llama_S3_chatbot],
outputs=[task, task_prompt, llama_S3_chatbot])
# Event Handler for GPT 3.5 Chatbot POS/Chunk, user must submit api key before submitting the prompt
# Will activate after getting API key
# task_apikey_btn.click(update_api_key, inputs=ling_ents_apikey_input)
task_btn.click(gpt_strategies_respond, inputs=[have_key, strategy1, task, task_linguistic_entities, task_prompt, gpt_S1_chatbot],
outputs=[task_prompt, gpt_S1_chatbot])
task_btn.click(gpt_strategies_respond, inputs=[have_key, strategy2, task, task_linguistic_entities, task_prompt, gpt_S2_chatbot],
outputs=[task_prompt, gpt_S2_chatbot])
task_btn.click(gpt_strategies_respond, inputs=[have_key, strategy3, task, task_linguistic_entities, task_prompt, gpt_S3_chatbot],
outputs=[task_prompt, gpt_S3_chatbot])
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# Assessing the Articulate
## A Comparative Analysis of the Core Linguistic Knowledge in Large Language Models
""")
# load interface
interface()
demo.launch()