Spaces:
Runtime error
Runtime error
File size: 4,584 Bytes
b308128 937be2f 7f877a9 b308128 8eb0f9a 7eaa7b0 937be2f 04fc021 bbe45f1 49c7ae8 b286b3f 49c7ae8 35e0ec8 edb0bcd 35e0ec8 b286b3f a450a5f b286b3f 85bd1c9 b286b3f 8eb0f9a b286b3f a04a444 2a20876 b286b3f 2a20876 b286b3f 2a20876 b286b3f a04a444 b286b3f a04a444 b286b3f a04a444 b286b3f a04a444 2a20876 ac4f141 b286b3f a450a5f 2a20876 5e8be56 8c245db |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import time
# Load the Vicuna 7B v1.3 LMSys model and tokenizer
model_name = "lmsys/vicuna-7b-v1.3"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
template_single = '''Output any <{}> in the following sentence one per line: "{}"'''
linguistic_entities = [
"Noun",
"Determiner",
"Noun phrase",
"Verb phrase",
"Dependent Clause",
"T-units"
]
with gr.Blocks() as demo:
gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
gr.Markdown(" Description ")
# Dropdown for linguistic entities
entity_dropdown = gr.Dropdown(linguistic_entities, label="Select Linguistic Entity")
prompt_POS = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
gr.Markdown("Strategy 1 QA-Based Prompting")
with gr.Row():
vicuna_S1_chatbot_POS = gr.Chatbot(label="vicuna-7b")
llama_S1_chatbot_POS = gr.Chatbot(label="llama-7b")
gpt_S1_chatbot_POS = gr.Chatbot(label="gpt-3.5")
clear = gr.ClearButton([prompt_POS, vicuna_S1_chatbot_POS])
gr.Markdown("Strategy 2 Instruction-Based Prompting")
with gr.Row():
vicuna_S2_chatbot_POS = gr.Chatbot(label="vicuna-7b")
llama_S2_chatbot_POS = gr.Chatbot(label="llama-7b")
gpt_S2_chatbot_POS = gr.Chatbot(label="gpt-3.5")
clear = gr.ClearButton([prompt_POS, vicuna_S2_chatbot_POS])
gr.Markdown("Strategy 3 Structured Prompting")
with gr.Row():
vicuna_S3_chatbot_POS = gr.Chatbot(label="vicuna-7b")
llama_S3_chatbot_POS = gr.Chatbot(label="llama-7b")
gpt_S3_chatbot_POS = gr.Chatbot(label="gpt-3.5")
clear = gr.ClearButton([prompt_POS, vicuna_S3_chatbot_POS])
# gr.Markdown(" Description ")
# prompt_CHUNK = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
# gr.Markdown("Strategy 1 QA")
# with gr.Row():
# vicuna_S1_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
# llama_S1_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
# gpt_S1_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
# clear = gr.ClearButton([prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
# gr.Markdown("Strategy 2 Instruction")
# with gr.Row():
# vicuna_S2_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
# llama_S2_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
# gpt_S2_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
# clear = gr.ClearButton([prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
# gr.Markdown("Strategy 3 Structured Prompting")
# with gr.Row():
# vicuna_S3_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
# llama_S3_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
# gpt_S3_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
# clear = gr.ClearButton([prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
# def respond(message, chat_history):
# input_ids = tokenizer.encode(message, return_tensors="pt")
# output_ids = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
# bot_message = tokenizer.decode(output_ids[0], skip_special_tokens=True)
# chat_history.append((message, bot_message))
# time.sleep(2)
# return "", chat_history
def respond_entities(message, chat_history):
entity = entity_dropdown.value
prompt = template_single.format(entity, message)
input_ids = tokenizer.encode(prompt, return_tensors="pt")
output_ids = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
bot_message = tokenizer.decode(output_ids[0], skip_special_tokens=True)
chat_history.append((message, bot_message))
time.sleep(2)
return bot_message
prompt_POS.submit(respond_entities, [prompt_POS, vicuna_S1_chatbot_POS], [prompt_POS, vicuna_S1_chatbot_POS])
prompt_POS.submit(respond_entities, [prompt_POS, vicuna_S2_chatbot_POS], [prompt_POS, vicuna_S2_chatbot_POS])
prompt_POS.submit(respond_entities, [prompt_POS, vicuna_S3_chatbot_POS], [prompt_POS, vicuna_S3_chatbot_POS])
# prompt_CHUNK.submit(respond_entities, [prompt_CHUNK, vicuna_S1_chatbot_CHUNK], [prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
# prompt_CHUNK.submit(respond_entities, [prompt_CHUNK, vicuna_S2_chatbot_CHUNK], [prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
# prompt_CHUNK.submit(respond_entities, [prompt_CHUNK, vicuna_S3_chatbot_CHUNK], [prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
demo.launch()
|