Spaces:
Runtime error
Runtime error
File size: 4,750 Bytes
b308128 937be2f 7f877a9 738a5f6 8eb0f9a 7eaa7b0 937be2f 04fc021 738a5f6 3c712c1 a8ee66f 3c712c1 a8ee66f 3c712c1 076d731 738a5f6 49c7ae8 076d731 3213aa6 076d731 75c4a83 738a5f6 076d731 47bb3d2 738a5f6 076d731 47bb3d2 738a5f6 076d731 47bb3d2 738a5f6 3213aa6 75c4a83 3213aa6 738a5f6 49c7ae8 35e0ec8 edb0bcd 738a5f6 076d731 35e0ec8 738a5f6 9b06190 a450a5f 9b06190 c597e04 9b06190 a04a444 738a5f6 076d731 738a5f6 076d731 738a5f6 076d731 738a5f6 076d731 a450a5f 9b06190 5e8be56 8c245db |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import time
import openai
# Load the Vicuna 7B v1.3 LMSys model and tokenizer
model_name = "lmsys/vicuna-7b-v1.3"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
template_single = '''Please output any <{}> in the following sentence one per line without any additional text: "{}"'''
def gpt3(prompt):
response = openai.ChatCompletion.create(
model='gpt3.5', messages=[{"role": "user", "content": prompt}])
return response['choices'][0]['message']['content']
def respond(message, chat_history):
input_ids = tokenizer.encode(message, return_tensors="pt")
output_ids = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
bot_message = tokenizer.decode(output_ids[0], skip_special_tokens=True)
chat_history.append((message, bot_message))
time.sleep(2)
return "", chat_history
def interface(tab_name):
gr.Markdown(" Description ")
textbox_prompt = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
api_key = gr.Textbox(label="Open AI Key", placeholder="Enter your Openai key here", type="password")
openai.api_key = api_key
prompt = template_single.format(tab_name, textbox_prompt)
gr.Markdown("Strategy 1 QA-Based Prompting")
with gr.Row():
vicuna_S1_chatbot = gr.Chatbot(label="vicuna-7b")
llama_S1_chatbot = gr.Chatbot(label="llama-7b")
gpt_S1_chatbot = gr.Chatbot(label="gpt-3.5")
clear = gr.ClearButton([textbox_prompt, vicuna_S1_chatbot])
gr.Markdown("Strategy 2 Instruction-Based Prompting")
with gr.Row():
vicuna_S2_chatbot = gr.Chatbot(label="vicuna-7b")
llama_S2_chatbot = gr.Chatbot(label="llama-7b")
gpt_S2_chatbot = gr.Chatbot(label="gpt-3.5")
clear = gr.ClearButton([textbox_prompt, vicuna_S2_chatbot])
gr.Markdown("Strategy 3 Structured Prompting")
with gr.Row():
vicuna_S3_chatbot = gr.Chatbot(label="vicuna-7b")
llama_S3_chatbot = gr.Chatbot(label="llama-7b")
gpt_S3_chatbot = gr.Chatbot(label="gpt-3.5")
clear = gr.ClearButton([textbox_prompt, vicuna_S3_chatbot])
textbox_prompt.submit(respond, [textbox_prompt, vicuna_S1_chatbot], [textbox_prompt, vicuna_S1_chatbot])
textbox_prompt.submit(respond, [textbox_prompt, vicuna_S2_chatbot], [textbox_prompt, vicuna_S2_chatbot])
textbox_prompt.submit(respond, [textbox_prompt, vicuna_S3_chatbot], [textbox_prompt, vicuna_S3_chatbot])
api_key.submit(gpt3, [textbox_prompt, gpt_S1_chatbot], [textbox_prompt, gpt_S1_chatbot])
with gr.Blocks() as demo:
gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
with gr.Tab("Noun"):
interface("Noun")
with gr.Tab("Determiner"):
gr.Markdown(" Description ")
prompt_CHUNK = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
gr.Markdown("Strategy 1 QA")
with gr.Row():
vicuna_S1_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
llama_S1_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
gpt_S1_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
clear = gr.ClearButton([prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
gr.Markdown("Strategy 2 Instruction")
with gr.Row():
vicuna_S2_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
llama_S2_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
gpt_S2_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
clear = gr.ClearButton([prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
gr.Markdown("Strategy 3 Structured Prompting")
with gr.Row():
vicuna_S3_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
llama_S3_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
gpt_S3_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
clear = gr.ClearButton([prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
with gr.Tab("Noun phrase"):
interface("Noun phrase")
with gr.Tab("Verb phrase"):
interface("Verb phrase")
with gr.Tab("Dependent clause"):
interface("Dependent clause")
with gr.Tab("T-units"):
interface("T-units")
prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S1_chatbot_CHUNK], [prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S2_chatbot_CHUNK], [prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S3_chatbot_CHUNK], [prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
demo.launch()
|