Spaces:
Runtime error
Runtime error
Commit
·
b286b3f
1
Parent(s):
a04a444
test
Browse files
app.py
CHANGED
@@ -7,112 +7,97 @@ model_name = "lmsys/vicuna-7b-v1.3"
|
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
9 |
|
10 |
-
|
11 |
-
# msg = template_all.format(text)
|
12 |
-
template_all = "Output the <Noun, Verb, Adjective, Adverb, Preposition/Subord, Coordinating Conjunction, Cardinal Number, Determiner, Noun Phrase, Verb Phrase, Adjective Phrase, Adverb Phrase, Preposition Phrase, Conjunction Phrase, Coordinate Phrase, Quantitave Phrase, Complex Nominal, Clause, Dependent Clause, Fragment Clause, T-unit, Complex T-unit, Fragment T-unit> in the following sentence without additional text in json format: "
|
13 |
-
# msg = template_single.format(ents_prompt[eid], text)
|
14 |
-
# template_single = "Output any <{}> in the following sentence one per line without additional text: "
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
|
|
|
|
|
|
21 |
|
22 |
with gr.Blocks() as demo:
|
23 |
gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
|
24 |
|
25 |
-
with gr.Tab("POS"):
|
26 |
-
gr.Markdown(" Description ")
|
27 |
-
|
28 |
-
prompt_POS = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
|
29 |
-
|
30 |
-
gr.Markdown("Strategy 1 QA-Based Prompting")
|
31 |
-
with gr.Row():
|
32 |
-
vicuna_S1_chatbot_POS = gr.Chatbot(label="vicuna-7b")
|
33 |
-
llama_S1_chatbot_POS = gr.Chatbot(label="llama-7b")
|
34 |
-
gpt_S1_chatbot_POS = gr.Chatbot(label="gpt-3.5")
|
35 |
-
clear = gr.ClearButton([prompt_POS, vicuna_S1_chatbot_POS])
|
36 |
-
gr.Markdown("Strategy 2 Instruction-Based Prompting")
|
37 |
-
with gr.Row():
|
38 |
-
vicuna_S2_chatbot_POS = gr.Chatbot(label="vicuna-7b")
|
39 |
-
llama_S2_chatbot_POS = gr.Chatbot(label="llama-7b")
|
40 |
-
gpt_S2_chatbot_POS = gr.Chatbot(label="gpt-3.5")
|
41 |
-
clear = gr.ClearButton([prompt_POS, vicuna_S2_chatbot_POS])
|
42 |
-
gr.Markdown("Strategy 3 Structured Prompting")
|
43 |
-
with gr.Row():
|
44 |
-
vicuna_S3_chatbot_POS = gr.Chatbot(label="vicuna-7b")
|
45 |
-
llama_S3_chatbot_POS = gr.Chatbot(label="llama-7b")
|
46 |
-
gpt_S3_chatbot_POS = gr.Chatbot(label="gpt-3.5")
|
47 |
-
clear = gr.ClearButton([prompt_POS, vicuna_S3_chatbot_POS])
|
48 |
|
49 |
-
|
50 |
-
gr.Markdown(" Description ")
|
51 |
|
52 |
-
|
|
|
53 |
|
54 |
-
|
55 |
-
with gr.Row():
|
56 |
-
vicuna_S1_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
|
57 |
-
llama_S1_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
|
58 |
-
gpt_S1_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
|
59 |
-
clear = gr.ClearButton([prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
|
60 |
-
gr.Markdown("Strategy 2 Instruction")
|
61 |
-
with gr.Row():
|
62 |
-
vicuna_S2_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
|
63 |
-
llama_S2_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
|
64 |
-
gpt_S2_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
|
65 |
-
clear = gr.ClearButton([prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
|
66 |
-
gr.Markdown("Strategy 3 Structured Prompting")
|
67 |
-
with gr.Row():
|
68 |
-
vicuna_S3_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
|
69 |
-
llama_S3_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
|
70 |
-
gpt_S3_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
|
71 |
-
clear = gr.ClearButton([prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
|
72 |
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
|
82 |
-
def strategy1(message, chat_history):
|
83 |
-
input_ids = tokenizer.encode(template_all + message, return_tensors="pt")
|
84 |
-
output_ids = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
|
85 |
-
bot_message = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
86 |
-
|
87 |
-
chat_history.append((template_all + message, bot_message))
|
88 |
-
time.sleep(2)
|
89 |
-
return "", chat_history
|
90 |
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
|
100 |
-
def
|
101 |
-
|
|
|
|
|
102 |
output_ids = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
|
103 |
bot_message = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
104 |
|
105 |
-
chat_history.append((
|
106 |
time.sleep(2)
|
107 |
return "", chat_history
|
108 |
-
|
109 |
|
110 |
-
prompt_POS.submit(
|
111 |
-
prompt_POS.submit(
|
112 |
-
prompt_POS.submit(
|
113 |
|
114 |
-
prompt_CHUNK.submit(
|
115 |
-
prompt_CHUNK.submit(
|
116 |
-
prompt_CHUNK.submit(
|
117 |
|
118 |
demo.launch()
|
|
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
9 |
|
10 |
+
template_single = '''Please output any <{}> in the following sentence one per line without any additional text: "{}"'''
|
|
|
|
|
|
|
|
|
11 |
|
12 |
+
linguistic_entities = [
|
13 |
+
"Noun",
|
14 |
+
"Determiner",
|
15 |
+
"Noun phrase",
|
16 |
+
"Verb phrase",
|
17 |
+
"Dependent Clause",
|
18 |
+
"T-units"
|
19 |
+
]
|
20 |
|
21 |
with gr.Blocks() as demo:
|
22 |
gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
+
gr.Markdown(" Description ")
|
|
|
26 |
|
27 |
+
# Dropdown for linguistic entities
|
28 |
+
entity_dropdown = gr.Dropdown(linguistic_entities, label="Select Linguistic Entity")
|
29 |
|
30 |
+
prompt_POS = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
+
gr.Markdown("Strategy 1 QA-Based Prompting")
|
33 |
+
with gr.Row():
|
34 |
+
vicuna_S1_chatbot_POS = gr.Chatbot(label="vicuna-7b")
|
35 |
+
llama_S1_chatbot_POS = gr.Chatbot(label="llama-7b")
|
36 |
+
gpt_S1_chatbot_POS = gr.Chatbot(label="gpt-3.5")
|
37 |
+
clear = gr.ClearButton([prompt_POS, vicuna_S1_chatbot_POS])
|
38 |
+
gr.Markdown("Strategy 2 Instruction-Based Prompting")
|
39 |
+
with gr.Row():
|
40 |
+
vicuna_S2_chatbot_POS = gr.Chatbot(label="vicuna-7b")
|
41 |
+
llama_S2_chatbot_POS = gr.Chatbot(label="llama-7b")
|
42 |
+
gpt_S2_chatbot_POS = gr.Chatbot(label="gpt-3.5")
|
43 |
+
clear = gr.ClearButton([prompt_POS, vicuna_S2_chatbot_POS])
|
44 |
+
gr.Markdown("Strategy 3 Structured Prompting")
|
45 |
+
with gr.Row():
|
46 |
+
vicuna_S3_chatbot_POS = gr.Chatbot(label="vicuna-7b")
|
47 |
+
llama_S3_chatbot_POS = gr.Chatbot(label="llama-7b")
|
48 |
+
gpt_S3_chatbot_POS = gr.Chatbot(label="gpt-3.5")
|
49 |
+
clear = gr.ClearButton([prompt_POS, vicuna_S3_chatbot_POS])
|
50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
+
gr.Markdown(" Description ")
|
53 |
+
|
54 |
+
prompt_CHUNK = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
|
55 |
+
|
56 |
+
gr.Markdown("Strategy 1 QA")
|
57 |
+
with gr.Row():
|
58 |
+
vicuna_S1_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
|
59 |
+
llama_S1_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
|
60 |
+
gpt_S1_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
|
61 |
+
clear = gr.ClearButton([prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
|
62 |
+
gr.Markdown("Strategy 2 Instruction")
|
63 |
+
with gr.Row():
|
64 |
+
vicuna_S2_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
|
65 |
+
llama_S2_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
|
66 |
+
gpt_S2_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
|
67 |
+
clear = gr.ClearButton([prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
|
68 |
+
gr.Markdown("Strategy 3 Structured Prompting")
|
69 |
+
with gr.Row():
|
70 |
+
vicuna_S3_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
|
71 |
+
llama_S3_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
|
72 |
+
gpt_S3_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
|
73 |
+
clear = gr.ClearButton([prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
|
74 |
+
|
75 |
+
# def respond(message, chat_history):
|
76 |
+
# input_ids = tokenizer.encode(message, return_tensors="pt")
|
77 |
+
# output_ids = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
|
78 |
+
# bot_message = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
79 |
|
80 |
+
# chat_history.append((message, bot_message))
|
81 |
+
# time.sleep(2)
|
82 |
+
# return "", chat_history
|
83 |
|
84 |
+
def respond_entities(message, chat_history):
|
85 |
+
entity = entity_dropdown.value
|
86 |
+
prompt = template_single.format(entity, message)
|
87 |
+
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
88 |
output_ids = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
|
89 |
bot_message = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
90 |
|
91 |
+
chat_history.append((message, bot_message))
|
92 |
time.sleep(2)
|
93 |
return "", chat_history
|
|
|
94 |
|
95 |
+
prompt_POS.submit(respond_entities, [prompt_POS, vicuna_S1_chatbot_POS], [prompt_POS, vicuna_S1_chatbot_POS])
|
96 |
+
prompt_POS.submit(respond_entities, [prompt_POS, vicuna_S2_chatbot_POS], [prompt_POS, vicuna_S2_chatbot_POS])
|
97 |
+
prompt_POS.submit(respond_entities, [prompt_POS, vicuna_S3_chatbot_POS], [prompt_POS, vicuna_S3_chatbot_POS])
|
98 |
|
99 |
+
prompt_CHUNK.submit(respond_entities, [prompt_CHUNK, vicuna_S1_chatbot_CHUNK], [prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
|
100 |
+
prompt_CHUNK.submit(respond_entities, [prompt_CHUNK, vicuna_S2_chatbot_CHUNK], [prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
|
101 |
+
prompt_CHUNK.submit(respond_entities, [prompt_CHUNK, vicuna_S3_chatbot_CHUNK], [prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
|
102 |
|
103 |
demo.launch()
|