research14 commited on
Commit
26f68af
·
1 Parent(s): 49c7ae8
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -9,13 +9,13 @@ model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
10
  ## Task 1
11
  # msg = template_all.format(text)
12
- template_all = '''Output the <Noun, Verb, Adjective, Adverb, Preposition/Subord, Coordinating Conjunction, Cardinal Number, Determiner, Noun Phrase, Verb Phrase, Adjective Phrase, Adverb Phrase, Preposition Phrase, Conjunction Phrase, Coordinate Phrase, Quantitave Phrase, Complex Nominal, Clause, Dependent Clause, Fragment Clause, T-unit, Complex T-unit, Fragment T-unit> in the following sentence without additional text in json format: "{}"'''
13
  # msg = template_single.format(ents_prompt[eid], text)
14
- template_single = '''Output any <{}> in the following sentence one per line without additional text: "{}"'''
15
 
16
  ## Task 2
17
- prompt2_pos = '''POS tag the following sentence using Universal POS tag set without generating additional text: {}'''
18
- prompt2_chunk = '''Do sentence chunking for the following sentence as in CoNLL 2000 shared task without generating addtional text: {}'''
19
 
20
  ## Task 3
21
 
@@ -79,12 +79,12 @@ with gr.Blocks() as demo:
79
  time.sleep(2)
80
  return "", chat_history
81
 
82
- prompt_POS.submit(respond, [template_all.format(prompt_POS), vicuna_S1_chatbot_POS], [template_all.format(prompt_POS), vicuna_S1_chatbot_POS])
83
- prompt_POS.submit(respond, [prompt2_pos.format(prompt_POS), vicuna_S2_chatbot_POS], [prompt2_pos.format(prompt_POS), vicuna_S2_chatbot_POS])
84
  prompt_POS.submit(respond, [prompt_POS, vicuna_S3_chatbot_POS], [prompt_POS, vicuna_S3_chatbot_POS])
85
 
86
- prompt_CHUNK.submit(respond, [template_all.format(prompt_CHUNK), vicuna_S1_chatbot_CHUNK], [template_all.format(prompt_CHUNK), vicuna_S1_chatbot_CHUNK])
87
- prompt_CHUNK.submit(respond, [prompt2_chunk.format(prompt_CHUNK), vicuna_S2_chatbot_CHUNK], [prompt2_chunk.format(prompt_CHUNK), vicuna_S2_chatbot_CHUNK])
88
  prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S3_chatbot_CHUNK], [prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
89
 
90
  demo.launch()
 
9
 
10
  ## Task 1
11
  # msg = template_all.format(text)
12
+ template_all = "Output the <Noun, Verb, Adjective, Adverb, Preposition/Subord, Coordinating Conjunction, Cardinal Number, Determiner, Noun Phrase, Verb Phrase, Adjective Phrase, Adverb Phrase, Preposition Phrase, Conjunction Phrase, Coordinate Phrase, Quantitave Phrase, Complex Nominal, Clause, Dependent Clause, Fragment Clause, T-unit, Complex T-unit, Fragment T-unit> in the following sentence without additional text in json format: "
13
  # msg = template_single.format(ents_prompt[eid], text)
14
+ # template_single = "Output any <{}> in the following sentence one per line without additional text: "
15
 
16
  ## Task 2
17
+ prompt2_pos = "POS tag the following sentence using Universal POS tag set without generating additional text: "
18
+ prompt2_chunk = "Do sentence chunking for the following sentence as in CoNLL 2000 shared task without generating addtional text: "
19
 
20
  ## Task 3
21
 
 
79
  time.sleep(2)
80
  return "", chat_history
81
 
82
+ prompt_POS.submit(respond, [template_all + prompt_POS, vicuna_S1_chatbot_POS], [template_all + prompt_POS, vicuna_S1_chatbot_POS])
83
+ prompt_POS.submit(respond, [prompt2_pos + prompt_POS, vicuna_S2_chatbot_POS], [prompt2_pos + prompt_POS, vicuna_S2_chatbot_POS])
84
  prompt_POS.submit(respond, [prompt_POS, vicuna_S3_chatbot_POS], [prompt_POS, vicuna_S3_chatbot_POS])
85
 
86
+ prompt_CHUNK.submit(respond, [template_all + prompt_CHUNK, vicuna_S1_chatbot_CHUNK], [template_all + prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
87
+ prompt_CHUNK.submit(respond, [prompt2_chunk + prompt_CHUNK, vicuna_S2_chatbot_CHUNK], [prompt2_chunk + prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
88
  prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S3_chatbot_CHUNK], [prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
89
 
90
  demo.launch()