research14 commited on
Commit
49c7ae8
·
1 Parent(s): 35e0ec8
Files changed (1) hide show
  1. app.py +18 -6
app.py CHANGED
@@ -7,6 +7,18 @@ model_name = "lmsys/vicuna-7b-v1.3"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  with gr.Blocks() as demo:
11
  gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
12
 
@@ -15,13 +27,13 @@ with gr.Blocks() as demo:
15
 
16
  prompt_POS = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
17
 
18
- gr.Markdown("Strategy 1 QA")
19
  with gr.Row():
20
  vicuna_S1_chatbot_POS = gr.Chatbot(label="vicuna-7b")
21
  llama_S1_chatbot_POS = gr.Chatbot(label="llama-7b")
22
  gpt_S1_chatbot_POS = gr.Chatbot(label="gpt-3.5")
23
  clear = gr.ClearButton([prompt_POS, vicuna_S1_chatbot_POS])
24
- gr.Markdown("Strategy 2 Instruction")
25
  with gr.Row():
26
  vicuna_S2_chatbot_POS = gr.Chatbot(label="vicuna-7b")
27
  llama_S2_chatbot_POS = gr.Chatbot(label="llama-7b")
@@ -67,12 +79,12 @@ with gr.Blocks() as demo:
67
  time.sleep(2)
68
  return "", chat_history
69
 
70
- prompt_POS.submit(respond, [prompt_POS, vicuna_S1_chatbot_POS], [prompt_POS, vicuna_S1_chatbot_POS])
71
- prompt_POS.submit(respond, [prompt_POS, vicuna_S2_chatbot_POS], [prompt_POS, vicuna_S2_chatbot_POS])
72
  prompt_POS.submit(respond, [prompt_POS, vicuna_S3_chatbot_POS], [prompt_POS, vicuna_S3_chatbot_POS])
73
 
74
- prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S1_chatbot_CHUNK], [prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
75
- prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S2_chatbot_CHUNK], [prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
76
  prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S3_chatbot_CHUNK], [prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
77
 
78
  demo.launch()
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
10
+ ## Task 1
11
+ # msg = template_all.format(text)
12
+ template_all = '''Output the <Noun, Verb, Adjective, Adverb, Preposition/Subord, Coordinating Conjunction, Cardinal Number, Determiner, Noun Phrase, Verb Phrase, Adjective Phrase, Adverb Phrase, Preposition Phrase, Conjunction Phrase, Coordinate Phrase, Quantitave Phrase, Complex Nominal, Clause, Dependent Clause, Fragment Clause, T-unit, Complex T-unit, Fragment T-unit> in the following sentence without additional text in json format: "{}"'''
13
+ # msg = template_single.format(ents_prompt[eid], text)
14
+ template_single = '''Output any <{}> in the following sentence one per line without additional text: "{}"'''
15
+
16
+ ## Task 2
17
+ prompt2_pos = '''POS tag the following sentence using Universal POS tag set without generating additional text: {}'''
18
+ prompt2_chunk = '''Do sentence chunking for the following sentence as in CoNLL 2000 shared task without generating addtional text: {}'''
19
+
20
+ ## Task 3
21
+
22
  with gr.Blocks() as demo:
23
  gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
24
 
 
27
 
28
  prompt_POS = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
29
 
30
+ gr.Markdown("Strategy 1 QA-Based Prompting")
31
  with gr.Row():
32
  vicuna_S1_chatbot_POS = gr.Chatbot(label="vicuna-7b")
33
  llama_S1_chatbot_POS = gr.Chatbot(label="llama-7b")
34
  gpt_S1_chatbot_POS = gr.Chatbot(label="gpt-3.5")
35
  clear = gr.ClearButton([prompt_POS, vicuna_S1_chatbot_POS])
36
+ gr.Markdown("Strategy 2 Instruction-Based Prompting")
37
  with gr.Row():
38
  vicuna_S2_chatbot_POS = gr.Chatbot(label="vicuna-7b")
39
  llama_S2_chatbot_POS = gr.Chatbot(label="llama-7b")
 
79
  time.sleep(2)
80
  return "", chat_history
81
 
82
+ prompt_POS.submit(respond, [template_all.format(prompt_POS), vicuna_S1_chatbot_POS], [template_all.format(prompt_POS), vicuna_S1_chatbot_POS])
83
+ prompt_POS.submit(respond, [prompt2_pos.format(prompt_POS), vicuna_S2_chatbot_POS], [prompt2_pos.format(prompt_POS), vicuna_S2_chatbot_POS])
84
  prompt_POS.submit(respond, [prompt_POS, vicuna_S3_chatbot_POS], [prompt_POS, vicuna_S3_chatbot_POS])
85
 
86
+ prompt_CHUNK.submit(respond, [template_all.format(prompt_CHUNK), vicuna_S1_chatbot_CHUNK], [template_all.format(prompt_CHUNK), vicuna_S1_chatbot_CHUNK])
87
+ prompt_CHUNK.submit(respond, [prompt2_chunk.format(prompt_CHUNK), vicuna_S2_chatbot_CHUNK], [prompt2_chunk.format(prompt_CHUNK), vicuna_S2_chatbot_CHUNK])
88
  prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S3_chatbot_CHUNK], [prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
89
 
90
  demo.launch()