Spaces:
Runtime error
Runtime error
File size: 7,957 Bytes
b308128 937be2f cd41e3a cd1760d 7f877a9 2baca0d 738a5f6 94f53fc 224700e 94f53fc 21090d3 04fc021 738a5f6 cd1760d 66d9704 cd1760d 66d9704 1941971 6204d1b 1941971 8c76c4e b2e68d1 2baca0d b2e68d1 2baca0d 94f53fc c15f723 d067fae c15f723 3c712c1 66d9704 d067fae 971271a 2b07352 3036c83 230d9ba 3036c83 e56b70d 3036c83 94f53fc 3036c83 94f53fc 66d9704 d067fae 971271a 2b07352 ee8b9cc a2797f8 94f53fc cfed1ed 2b07352 94f53fc 68440cc 94f53fc 02f452d 3c712c1 5cdbb3f 738a5f6 49c7ae8 076d731 1941971 c15f723 8c76c4e c15f723 8cbc513 42ca545 75c4a83 738a5f6 66d9704 738a5f6 076d731 94f53fc e56b70d 94f53fc e56b70d 738a5f6 66d9704 cd1760d 66d9704 2b06a1b adf1293 8c76c4e 35e0ec8 edb0bcd 94f53fc 35e0ec8 8c245db |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import lftk
import spacy
import time
import os
import openai
# Load the Vicuna 7B model and tokenizer
vicuna_tokenizer = AutoTokenizer.from_pretrained("lmsys/vicuna-7b-v1.3")
vicuna_model = AutoModelForCausalLM.from_pretrained("lmsys/vicuna-7b-v1.3")
# Load the LLaMA 7b model and tokenizer
llama_tokenizer = AutoTokenizer.from_pretrained("daryl149/llama-2-7b-chat-hf")
llama_model = AutoModelForCausalLM.from_pretrained("daryl149/llama-2-7b-chat-hf")
template_single = '''Please output any <{}> in the following sentence one per line without any additional text: "{}"'''
def linguistic_features(message):
# Load a trained spaCy pipeline
nlp = spacy.load("en_core_web_sm")
# Create a spaCy doc object
doc = nlp(message)
# Initiate LFTK extractor by passing in the doc
LFTK_extractor = lftk.Extractor(docs=doc)
# Customize LFTK extractor (optional)
LFTK_extractor.customize(stop_words=True, punctuations=False, round_decimal=3)
# Use LFTK to dynamically extract handcrafted linguistic features
features_to_extract = lftk.search_features(family="wordsent", language="general", return_format="list_key")
extracted_features = LFTK_extractor.extract(features=features_to_extract)
print('Linguistic Features:', extracted_features)
return extracted_features
def update_api_key(new_key):
global api_key
os.environ['OPENAI_API_TOKEN'] = new_key
openai.api_key = os.environ['OPENAI_API_TOKEN']
def chat(system_prompt, user_prompt, model = 'gpt-3.5-turbo', temperature = 0, verbose = False):
''' Normal call of OpenAI API '''
response = openai.ChatCompletion.create(
temperature = temperature,
model=model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
])
res = response['choices'][0]['message']['content']
if verbose:
print('System prompt:', system_prompt)
print('User prompt:', user_prompt)
print('GPT response:', res)
return res
def format_chat_prompt(message, chat_history, max_convo_length):
prompt = ""
for turn in chat_history[-max_convo_length:]:
user_message, bot_message = turn
prompt = f"{prompt}\nUser: {user_message}\nAssistant: {bot_message}"
prompt = f"{prompt}\nUser: {message}\nAssistant:"
return prompt
def gpt_respond(tab_name, message, chat_history, max_convo_length = 10):
formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
print('Prompt + Context:')
print(formatted_prompt)
bot_message = chat(system_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text.''',
user_prompt = formatted_prompt)
chat_history.append((message, bot_message))
return "", chat_history
def vicuna_respond(tab_name, message, chat_history, linguistic_features):
formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
print('Vicuna - Prompt + Context:')
print(formatted_prompt)
input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
output_ids = vicuna_model.generate(input_ids, do_sample=True, max_length=1024, num_beams=5, no_repeat_ngram_size=2)
bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
print(bot_message)
# Remove formatted prompt from bot_message
bot_message = bot_message.replace(formatted_prompt, '')
print(bot_message)
chat_history.append((formatted_prompt, bot_message))
time.sleep(2)
return tab_name, "", chat_history
def llama_respond(tab_name, message, chat_history, linguistic_features):
formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
print('Llama - Prompt + Context:')
print(formatted_prompt)
input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
output_ids = llama_model.generate(input_ids, do_sample=True, max_length=1024, num_beams=5, no_repeat_ngram_size=2)
bot_message = llama_tokenizer.decode(output_ids[0], skip_special_tokens=True)
# Remove formatted prompt from bot_message
bot_message = bot_message.replace(formatted_prompt, '')
print(bot_message)
chat_history.append((formatted_prompt, bot_message))
time.sleep(2)
return tab_name, "", chat_history
def interface():
gr.Markdown(" Description ")
textbox_prompt = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
with gr.Row():
api_key_input = gr.Textbox(label="Open AI Key", placeholder="Enter your Openai key here", type="password")
api_key_btn = gr.Button(value="Submit Key", scale=0)
tab_name = gr.Dropdown(["Noun", "Determiner", "Noun phrase", "Verb phrase", "Dependent clause", "T-units"], label="Linguistic Entity")
btn = gr.Button(value="Submit")
# prompt = template_single.format(tab_name, textbox_prompt)
gr.Markdown("Strategy 1 QA-Based Prompting")
linguistic_features_textbox = gr.Textbox(label="Linguistic Features", disabled=True)
with gr.Row():
vicuna_S1_chatbot = gr.Chatbot(label="vicuna-7b")
llama_S1_chatbot = gr.Chatbot(label="llama-7b")
gpt_S1_chatbot = gr.Chatbot(label="gpt-3.5")
clear = gr.ClearButton(components=[textbox_prompt, api_key_input, vicuna_S1_chatbot, llama_S1_chatbot, gpt_S1_chatbot])
# gr.Markdown("Strategy 2 Instruction-Based Prompting")
# with gr.Row():
# vicuna_S2_chatbot = gr.Chatbot(label="vicuna-7b")
# llama_S2_chatbot = gr.Chatbot(label="llama-7b")
# gpt_S2_chatbot = gr.Chatbot(label="gpt-3.5")
# clear = gr.ClearButton(components=[textbox_prompt, vicuna_S2_chatbot])
# gr.Markdown("Strategy 3 Structured Prompting")
# with gr.Row():
# vicuna_S3_chatbot = gr.Chatbot(label="vicuna-7b")
# llama_S3_chatbot = gr.Chatbot(label="llama-7b")
# gpt_S3_chatbot = gr.Chatbot(label="gpt-3.5")
# clear = gr.ClearButton(components=[textbox_prompt, vicuna_S3_chatbot])
#textbox_prompt.submit(vicuna_respond, inputs=[textbox_prompt, vicuna_S1_chatbot], outputs=[textbox_prompt, vicuna_S1_chatbot])
# textbox_prompt.submit(respond, inputs=[textbox_prompt, vicuna_S2_chatbot], outputs=[textbox_prompt, vicuna_S2_chatbot])
# textbox_prompt.submit(respond, inputs=[textbox_prompt, vicuna_S3_chatbot], outputs=[textbox_prompt, vicuna_S3_chatbot])
#textbox_prompt.submit(llama_respond, inputs=[textbox_prompt, llama_S1_chatbot], outputs=[textbox_prompt, llama_S1_chatbot])
btn.click(lambda _,
message=textbox_prompt: linguistic_features_textbox.update(linguistic_features(textbox_prompt)),
inputs=[textbox_prompt],
outputs=[linguistic_features_textbox])
btn.click(vicuna_respond, inputs=[tab_name, textbox_prompt, vicuna_S1_chatbot],
outputs=[tab_name, textbox_prompt, vicuna_S1_chatbot])
btn.click(llama_respond, inputs=[tab_name, textbox_prompt, llama_S1_chatbot],
outputs=[tab_name, textbox_prompt, llama_S1_chatbot])
#api_key_btn.click(update_api_key, inputs=api_key_input)
#btn.click(gpt_respond, inputs=[tab_name, textbox_prompt, gpt_S1_chatbot], outputs=[tab_name, textbox_prompt, gpt_S1_chatbot])
with gr.Blocks() as demo:
gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
interface()
demo.launch()
|