Spaces:
Runtime error
Runtime error
Commit
·
a04a444
1
Parent(s):
26f68af
test
Browse files
app.py
CHANGED
@@ -14,8 +14,8 @@ template_all = "Output the <Noun, Verb, Adjective, Adverb, Preposition/Subord, C
|
|
14 |
# template_single = "Output any <{}> in the following sentence one per line without additional text: "
|
15 |
|
16 |
## Task 2
|
17 |
-
|
18 |
-
|
19 |
|
20 |
## Task 3
|
21 |
|
@@ -78,13 +78,41 @@ with gr.Blocks() as demo:
|
|
78 |
chat_history.append((message, bot_message))
|
79 |
time.sleep(2)
|
80 |
return "", chat_history
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
|
82 |
-
prompt_POS.submit(
|
83 |
-
prompt_POS.submit(
|
84 |
prompt_POS.submit(respond, [prompt_POS, vicuna_S3_chatbot_POS], [prompt_POS, vicuna_S3_chatbot_POS])
|
85 |
|
86 |
-
prompt_CHUNK.submit(
|
87 |
-
prompt_CHUNK.submit(
|
88 |
prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S3_chatbot_CHUNK], [prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
|
89 |
|
90 |
demo.launch()
|
|
|
14 |
# template_single = "Output any <{}> in the following sentence one per line without additional text: "
|
15 |
|
16 |
## Task 2
|
17 |
+
task2_pos = "POS tag the following sentence using Universal POS tag set without generating additional text: "
|
18 |
+
task2_chunk = "Do sentence chunking for the following sentence as in CoNLL 2000 shared task without generating addtional text: "
|
19 |
|
20 |
## Task 3
|
21 |
|
|
|
78 |
chat_history.append((message, bot_message))
|
79 |
time.sleep(2)
|
80 |
return "", chat_history
|
81 |
+
|
82 |
+
def strategy1(message, chat_history):
|
83 |
+
input_ids = tokenizer.encode(template_all + message, return_tensors="pt")
|
84 |
+
output_ids = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
|
85 |
+
bot_message = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
86 |
+
|
87 |
+
chat_history.append((template_all + message, bot_message))
|
88 |
+
time.sleep(2)
|
89 |
+
return "", chat_history
|
90 |
+
|
91 |
+
def strategy2_POS(message, chat_history):
|
92 |
+
input_ids = tokenizer.encode(task2_pos + message, return_tensors="pt")
|
93 |
+
output_ids = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
|
94 |
+
bot_message = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
95 |
+
|
96 |
+
chat_history.append((task2_pos + message, bot_message))
|
97 |
+
time.sleep(2)
|
98 |
+
return "", chat_history
|
99 |
+
|
100 |
+
def strategy2_CHUNK(message, chat_history):
|
101 |
+
input_ids = tokenizer.encode(task2_chunk + message, return_tensors="pt")
|
102 |
+
output_ids = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
|
103 |
+
bot_message = tokenizer.decode(output_ids[0], skip_special_tokens=True)
|
104 |
+
|
105 |
+
chat_history.append((task2_chunk + message, bot_message))
|
106 |
+
time.sleep(2)
|
107 |
+
return "", chat_history
|
108 |
+
|
109 |
|
110 |
+
prompt_POS.submit(strategy1, [prompt_POS, vicuna_S1_chatbot_POS], [prompt_POS, vicuna_S1_chatbot_POS])
|
111 |
+
prompt_POS.submit(strategy2_POS, [prompt_POS, vicuna_S2_chatbot_POS], [prompt_POS, vicuna_S2_chatbot_POS])
|
112 |
prompt_POS.submit(respond, [prompt_POS, vicuna_S3_chatbot_POS], [prompt_POS, vicuna_S3_chatbot_POS])
|
113 |
|
114 |
+
prompt_CHUNK.submit(strategy1, [prompt_CHUNK, vicuna_S1_chatbot_CHUNK], [prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
|
115 |
+
prompt_CHUNK.submit(strategy2_CHUNK, [prompt_CHUNK, vicuna_S2_chatbot_CHUNK], [prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
|
116 |
prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S3_chatbot_CHUNK], [prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
|
117 |
|
118 |
demo.launch()
|