Spaces:
Runtime error
Runtime error
File size: 4,656 Bytes
b308128 937be2f 7f877a9 738a5f6 b308128 8eb0f9a 7eaa7b0 937be2f 04fc021 738a5f6 49c7ae8 738a5f6 49c7ae8 35e0ec8 edb0bcd 738a5f6 35e0ec8 738a5f6 a450a5f 738a5f6 c597e04 738a5f6 a04a444 738a5f6 a04a444 738a5f6 b286b3f 738a5f6 a04a444 b286b3f a04a444 738a5f6 a450a5f 738a5f6 5e8be56 8c245db |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import time
import openai
openai.api_key = "OPENAI_API_KEY"
# Load the Vicuna 7B v1.3 LMSys model and tokenizer
model_name = "lmsys/vicuna-7b-v1.3"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
template_single = '''Please output any <{}> in the following sentence one per line without any additional text: "{}"'''
Noun
Determiner
Noun phrase
Verb phrase
Dependent Clause
T-units
def interface():
gr.Markdown(" Description ")
prompt_POS = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
openai_key = gr.Textbox(label="Open AI Key", placeholder="Enter your Openai key here", type="password")
gr.Markdown("Strategy 1 QA-Based Prompting")
with gr.Row():
vicuna_S1_chatbot_POS = gr.Chatbot(label="vicuna-7b")
llama_S1_chatbot_POS = gr.Chatbot(label="llama-7b")
gpt_S1_chatbot_POS = gr.Chatbot(label="gpt-3.5")
clear = gr.ClearButton([prompt_POS, vicuna_S1_chatbot_POS])
gr.Markdown("Strategy 2 Instruction-Based Prompting")
with gr.Row():
vicuna_S2_chatbot_POS = gr.Chatbot(label="vicuna-7b")
llama_S2_chatbot_POS = gr.Chatbot(label="llama-7b")
gpt_S2_chatbot_POS = gr.Chatbot(label="gpt-3.5")
clear = gr.ClearButton([prompt_POS, vicuna_S2_chatbot_POS])
gr.Markdown("Strategy 3 Structured Prompting")
with gr.Row():
vicuna_S3_chatbot_POS = gr.Chatbot(label="vicuna-7b")
llama_S3_chatbot_POS = gr.Chatbot(label="llama-7b")
gpt_S3_chatbot_POS = gr.Chatbot(label="gpt-3.5")
clear = gr.ClearButton([prompt_POS, vicuna_S3_chatbot_POS])
prompt_POS.submit(respond, [prompt_POS, vicuna_S1_chatbot_POS], [prompt_POS, vicuna_S1_chatbot_POS])
prompt_POS.submit(respond, [prompt_POS, vicuna_S2_chatbot_POS], [prompt_POS, vicuna_S2_chatbot_POS])
prompt_POS.submit(respond, [prompt_POS, vicuna_S3_chatbot_POS], [prompt_POS, vicuna_S3_chatbot_POS])
with gr.Blocks() as demo:
gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
with gr.Tab("Noun"):
interface()
with gr.Tab("Determiner"):
gr.Markdown(" Description ")
prompt_CHUNK = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
gr.Markdown("Strategy 1 QA")
with gr.Row():
vicuna_S1_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
llama_S1_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
gpt_S1_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
clear = gr.ClearButton([prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
gr.Markdown("Strategy 2 Instruction")
with gr.Row():
vicuna_S2_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
llama_S2_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
gpt_S2_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
clear = gr.ClearButton([prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
gr.Markdown("Strategy 3 Structured Prompting")
with gr.Row():
vicuna_S3_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
llama_S3_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
gpt_S3_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
clear = gr.ClearButton([prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
with gr.Tab("Noun phrase"):
interface()
with gr.Tab("Verb phrase"):
interface()
with gr.Tab("Dependent clause"):
interface()
with gr.Tab("T-units"):
interface()
def gpt3(prompt):
response = openai.ChatCompletion.create(
model='gpt3.5', messages=[{"role": "user", "content": prompt}])
return response['choices'][0]['message']['content']
def respond(message, chat_history):
input_ids = tokenizer.encode(message, return_tensors="pt")
output_ids = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
bot_message = tokenizer.decode(output_ids[0], skip_special_tokens=True)
chat_history.append((message, bot_message))
time.sleep(2)
return "", chat_history
prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S1_chatbot_CHUNK], [prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S2_chatbot_CHUNK], [prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S3_chatbot_CHUNK], [prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
demo.launch()
|