Spaces:
Runtime error
Runtime error
Commit
·
a9a458f
1
Parent(s):
275f7cc
Edited prompts
Browse files
app.py
CHANGED
@@ -74,13 +74,13 @@ def gpt_respond(tab_name, message, chat_history, max_convo_length = 10):
|
|
74 |
formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
|
75 |
print('Prompt + Context:')
|
76 |
print(formatted_prompt)
|
77 |
-
bot_message = chat(system_prompt = f'''Generate the output only for the assistant.
|
78 |
user_prompt = formatted_prompt)
|
79 |
chat_history.append((message, bot_message))
|
80 |
return "", chat_history
|
81 |
|
82 |
def vicuna_respond(tab_name, message, chat_history):
|
83 |
-
formatted_prompt = f'''Generate the output only for the assistant.
|
84 |
print('Vicuna Ling Ents Fn - Prompt + Context:')
|
85 |
print(formatted_prompt)
|
86 |
input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
@@ -97,7 +97,7 @@ def vicuna_respond(tab_name, message, chat_history):
|
|
97 |
return tab_name, "", chat_history
|
98 |
|
99 |
def llama_respond(tab_name, message, chat_history):
|
100 |
-
formatted_prompt = f'''Generate the output only for the assistant.
|
101 |
# print('Llama - Prompt + Context:')
|
102 |
# print(formatted_prompt)
|
103 |
input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
@@ -116,18 +116,18 @@ def gpt_strategies_respond(strategy, task_name, task_ling_ent, message, chat_his
|
|
116 |
formatted_system_prompt = ""
|
117 |
if (task_name == "POS Tagging"):
|
118 |
if (strategy == "S1"):
|
119 |
-
formatted_system_prompt = f'''Generate the output only for the assistant.
|
120 |
elif (strategy == "S2"):
|
121 |
-
formatted_system_prompt = f'''
|
122 |
elif (strategy == "S3"):
|
123 |
-
formatted_system_prompt = f'''
|
124 |
elif (task_name == "Chunking"):
|
125 |
if (strategy == "S1"):
|
126 |
-
formatted_system_prompt = f'''Generate the output only for the assistant.
|
127 |
elif (strategy == "S2"):
|
128 |
-
formatted_system_prompt = f'''
|
129 |
elif (strategy == "S3"):
|
130 |
-
formatted_system_prompt = f'''
|
131 |
|
132 |
formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
|
133 |
print('Prompt + Context:')
|
@@ -141,18 +141,18 @@ def vicuna_strategies_respond(strategy, task_name, task_ling_ent, message, chat_
|
|
141 |
formatted_prompt = ""
|
142 |
if (task_name == "POS Tagging"):
|
143 |
if (strategy == "S1"):
|
144 |
-
formatted_prompt = f'''Generate the output only for the assistant.
|
145 |
elif (strategy == "S2"):
|
146 |
-
formatted_prompt = f'''
|
147 |
elif (strategy == "S3"):
|
148 |
-
formatted_prompt = f'''
|
149 |
elif (task_name == "Chunking"):
|
150 |
if (strategy == "S1"):
|
151 |
-
formatted_prompt = f'''Generate the output only for the assistant.
|
152 |
elif (strategy == "S2"):
|
153 |
-
formatted_prompt = f'''
|
154 |
elif (strategy == "S3"):
|
155 |
-
formatted_prompt = f'''
|
156 |
|
157 |
print('Vicuna Strategy Fn - Prompt + Context:')
|
158 |
print(formatted_prompt)
|
@@ -173,18 +173,18 @@ def llama_strategies_respond(strategy, task_name, task_ling_ent, message, chat_h
|
|
173 |
formatted_prompt = ""
|
174 |
if (task_name == "POS Tagging"):
|
175 |
if (strategy == "S1"):
|
176 |
-
formatted_prompt = f'''Generate the output only for the assistant.
|
177 |
elif (strategy == "S2"):
|
178 |
-
formatted_prompt = f'''
|
179 |
elif (strategy == "S3"):
|
180 |
-
formatted_prompt = f'''
|
181 |
elif (task_name == "Chunking"):
|
182 |
if (strategy == "S1"):
|
183 |
-
formatted_prompt = f'''Generate the output only for the assistant.
|
184 |
elif (strategy == "S2"):
|
185 |
-
formatted_prompt = f'''
|
186 |
elif (strategy == "S3"):
|
187 |
-
formatted_prompt = f'''
|
188 |
|
189 |
# print('Llama Strategies - Prompt + Context:')
|
190 |
# print(formatted_prompt)
|
|
|
74 |
formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
|
75 |
print('Prompt + Context:')
|
76 |
print(formatted_prompt)
|
77 |
+
bot_message = chat(system_prompt = f'''Generate the output only for the assistant. Output any <{tab_name}> in the following sentence one per line without any additional text.''',
|
78 |
user_prompt = formatted_prompt)
|
79 |
chat_history.append((message, bot_message))
|
80 |
return "", chat_history
|
81 |
|
82 |
def vicuna_respond(tab_name, message, chat_history):
|
83 |
+
formatted_prompt = f'''Generate the output only for the assistant. Output any {tab_name} in the following sentence one per line without any additional text: {message}'''
|
84 |
print('Vicuna Ling Ents Fn - Prompt + Context:')
|
85 |
print(formatted_prompt)
|
86 |
input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
|
|
97 |
return tab_name, "", chat_history
|
98 |
|
99 |
def llama_respond(tab_name, message, chat_history):
|
100 |
+
formatted_prompt = f'''Generate the output only for the assistant. Output any {tab_name} in the following sentence one per line without any additional text: {message}'''
|
101 |
# print('Llama - Prompt + Context:')
|
102 |
# print(formatted_prompt)
|
103 |
input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
|
|
|
116 |
formatted_system_prompt = ""
|
117 |
if (task_name == "POS Tagging"):
|
118 |
if (strategy == "S1"):
|
119 |
+
formatted_system_prompt = f'''Generate the output only for the assistant. Output any {task_ling_ent} in the following sentence one per line without any additional text: {message}'''
|
120 |
elif (strategy == "S2"):
|
121 |
+
formatted_system_prompt = f'''POS tag the following sentence using Universal POS tag set without generating any additional text: {message}'''
|
122 |
elif (strategy == "S3"):
|
123 |
+
formatted_system_prompt = f'''POS tag the following sentence using Universal POS tag set without generating any additional text: {message}'''
|
124 |
elif (task_name == "Chunking"):
|
125 |
if (strategy == "S1"):
|
126 |
+
formatted_system_prompt = f'''Generate the output only for the assistant. Output any {task_ling_ent} in the following sentence one per line without any additional text: {message}'''
|
127 |
elif (strategy == "S2"):
|
128 |
+
formatted_system_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
|
129 |
elif (strategy == "S3"):
|
130 |
+
formatted_system_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
|
131 |
|
132 |
formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
|
133 |
print('Prompt + Context:')
|
|
|
141 |
formatted_prompt = ""
|
142 |
if (task_name == "POS Tagging"):
|
143 |
if (strategy == "S1"):
|
144 |
+
formatted_prompt = f'''Generate the output only for the assistant. Output any {task_ling_ent} in the following sentence one per line without any additional text: {message}'''
|
145 |
elif (strategy == "S2"):
|
146 |
+
formatted_prompt = f'''POS tag the following sentence using Universal POS tag set without generating any additional text: {message}'''
|
147 |
elif (strategy == "S3"):
|
148 |
+
formatted_prompt = f'''POS tag the following sentence using Universal POS tag set without generating any additional text: {message}'''
|
149 |
elif (task_name == "Chunking"):
|
150 |
if (strategy == "S1"):
|
151 |
+
formatted_prompt = f'''Generate the output only for the assistant. Output any {task_ling_ent} in the following sentence one per line without any additional text: {message}'''
|
152 |
elif (strategy == "S2"):
|
153 |
+
formatted_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
|
154 |
elif (strategy == "S3"):
|
155 |
+
formatted_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
|
156 |
|
157 |
print('Vicuna Strategy Fn - Prompt + Context:')
|
158 |
print(formatted_prompt)
|
|
|
173 |
formatted_prompt = ""
|
174 |
if (task_name == "POS Tagging"):
|
175 |
if (strategy == "S1"):
|
176 |
+
formatted_prompt = f'''Generate the output only for the assistant. Output any {task_ling_ent} in the following sentence one per line without any additional text: {message}'''
|
177 |
elif (strategy == "S2"):
|
178 |
+
formatted_prompt = f'''POS tag the following sentence using Universal POS tag set without generating any additional text: {message}'''
|
179 |
elif (strategy == "S3"):
|
180 |
+
formatted_prompt = f'''POS tag the following sentence using Universal POS tag set without generating any additional text: {message}'''
|
181 |
elif (task_name == "Chunking"):
|
182 |
if (strategy == "S1"):
|
183 |
+
formatted_prompt = f'''Generate the output only for the assistant. Output any {task_ling_ent} in the following sentence one per line without any additional text: {message}'''
|
184 |
elif (strategy == "S2"):
|
185 |
+
formatted_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
|
186 |
elif (strategy == "S3"):
|
187 |
+
formatted_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
|
188 |
|
189 |
# print('Llama Strategies - Prompt + Context:')
|
190 |
# print(formatted_prompt)
|