research14 commited on
Commit
adf3921
·
1 Parent(s): d2a6101

removed text and added quotations to prompt

Browse files
Files changed (1) hide show
  1. app.py +21 -21
app.py CHANGED
@@ -51,13 +51,13 @@ def gpt_respond(tab_name, message, chat_history, max_convo_length = 10):
51
  formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
52
  print('Prompt + Context:')
53
  print(formatted_prompt)
54
- bot_message = chat(system_prompt = f'''Generate the output only for the assistant. Output any <{tab_name}> in the following sentence one per line without any additional text.''',
55
  user_prompt = formatted_prompt)
56
  chat_history.append((message, bot_message))
57
  return "", chat_history
58
 
59
  def vicuna_respond(tab_name, message, chat_history):
60
- formatted_prompt = f'''Generate the output only for the assistant. Output any {tab_name} in the following sentence one per line without any additional text: {message}'''
61
  print('Vicuna Ling Ents Fn - Prompt + Context:')
62
  print(formatted_prompt)
63
  input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
@@ -74,7 +74,7 @@ def vicuna_respond(tab_name, message, chat_history):
74
  return tab_name, "", chat_history
75
 
76
  def llama_respond(tab_name, message, chat_history):
77
- formatted_prompt = f'''Generate the output only for the assistant. Output any {tab_name} in the following sentence one per line without any additional text: {message}'''
78
  # print('Llama - Prompt + Context:')
79
  # print(formatted_prompt)
80
  input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
@@ -96,18 +96,18 @@ def gpt_strategies_respond(have_key, strategy, task_name, task_ling_ent, message
96
  formatted_system_prompt = ""
97
  if (task_name == "POS Tagging"):
98
  if (strategy == "S1"):
99
- formatted_system_prompt = f'''Generate the output only for the assistant. Output any {task_ling_ent} in the following sentence one per line without any additional text: {message}'''
100
  elif (strategy == "S2"):
101
- formatted_system_prompt = f'''POS tag the following sentence using Universal POS tag set without generating any additional text: {message}'''
102
  elif (strategy == "S3"):
103
- formatted_system_prompt = f'''POS tag the following sentence using Universal POS tag set without generating any additional text: {message}'''
104
  elif (task_name == "Chunking"):
105
  if (strategy == "S1"):
106
- formatted_system_prompt = f'''Generate the output only for the assistant. Output any {task_ling_ent} in the following sentence one per line without any additional text: {message}'''
107
  elif (strategy == "S2"):
108
- formatted_system_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
109
  elif (strategy == "S3"):
110
- formatted_system_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
111
 
112
  formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
113
  print('Prompt + Context:')
@@ -121,18 +121,18 @@ def vicuna_strategies_respond(strategy, task_name, task_ling_ent, message, chat_
121
  formatted_prompt = ""
122
  if (task_name == "POS Tagging"):
123
  if (strategy == "S1"):
124
- formatted_prompt = f'''Generate the output only for the assistant. Output any {task_ling_ent} in the following sentence one per line without any additional text: {message}'''
125
  elif (strategy == "S2"):
126
- formatted_prompt = f'''POS tag the following sentence using Universal POS tag set without generating any additional text: {message}'''
127
  elif (strategy == "S3"):
128
- formatted_prompt = f'''POS tag the following sentence using Universal POS tag set without generating any additional text: {message}'''
129
  elif (task_name == "Chunking"):
130
  if (strategy == "S1"):
131
- formatted_prompt = f'''Generate the output only for the assistant. Output any {task_ling_ent} in the following sentence one per line without any additional text: {message}'''
132
  elif (strategy == "S2"):
133
- formatted_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
134
  elif (strategy == "S3"):
135
- formatted_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
136
 
137
  print('Vicuna Strategy Fn - Prompt + Context:')
138
  print(formatted_prompt)
@@ -153,18 +153,18 @@ def llama_strategies_respond(strategy, task_name, task_ling_ent, message, chat_h
153
  formatted_prompt = ""
154
  if (task_name == "POS Tagging"):
155
  if (strategy == "S1"):
156
- formatted_prompt = f'''Generate the output only for the assistant. Output any {task_ling_ent} in the following sentence one per line without any additional text: {message}'''
157
  elif (strategy == "S2"):
158
- formatted_prompt = f'''POS tag the following sentence using Universal POS tag set without generating any additional text: {message}'''
159
  elif (strategy == "S3"):
160
- formatted_prompt = f'''POS tag the following sentence using Universal POS tag set without generating any additional text: {message}'''
161
  elif (task_name == "Chunking"):
162
  if (strategy == "S1"):
163
- formatted_prompt = f'''Generate the output only for the assistant. Output any {task_ling_ent} in the following sentence one per line without any additional text: {message}'''
164
  elif (strategy == "S2"):
165
- formatted_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
166
  elif (strategy == "S3"):
167
- formatted_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags without outputing any additional text: {message}'''
168
 
169
  # print('Llama Strategies - Prompt + Context:')
170
  # print(formatted_prompt)
 
51
  formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
52
  print('Prompt + Context:')
53
  print(formatted_prompt)
54
+ bot_message = chat(system_prompt = f'''Generate the output only for the assistant. Output any <{tab_name}> in the following sentence one per line.''',
55
  user_prompt = formatted_prompt)
56
  chat_history.append((message, bot_message))
57
  return "", chat_history
58
 
59
  def vicuna_respond(tab_name, message, chat_history):
60
+ formatted_prompt = f'''Output any {tab_name} in the following sentence one per line: "{message}"'''
61
  print('Vicuna Ling Ents Fn - Prompt + Context:')
62
  print(formatted_prompt)
63
  input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
 
74
  return tab_name, "", chat_history
75
 
76
  def llama_respond(tab_name, message, chat_history):
77
+ formatted_prompt = f'''Output any {tab_name} in the following sentence one per line: "{message}"'''
78
  # print('Llama - Prompt + Context:')
79
  # print(formatted_prompt)
80
  input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
 
96
  formatted_system_prompt = ""
97
  if (task_name == "POS Tagging"):
98
  if (strategy == "S1"):
99
+ formatted_system_prompt = f'''Generate the output only for the assistant. Output any {task_ling_ent} in the following sentence one per line: "{message}"'''
100
  elif (strategy == "S2"):
101
+ formatted_system_prompt = f'''POS tag the following sentence using Universal POS tag set: "{message}"'''
102
  elif (strategy == "S3"):
103
+ formatted_system_prompt = f'''POS tag the following sentence using Universal POS tag set: "{message}"'''
104
  elif (task_name == "Chunking"):
105
  if (strategy == "S1"):
106
+ formatted_system_prompt = f'''Generate the output only for the assistant. Output any {task_ling_ent} in the following sentence one per line: "{message}"'''
107
  elif (strategy == "S2"):
108
+ formatted_system_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags: "{message}"'''
109
  elif (strategy == "S3"):
110
+ formatted_system_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags: "{message}"'''
111
 
112
  formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
113
  print('Prompt + Context:')
 
121
  formatted_prompt = ""
122
  if (task_name == "POS Tagging"):
123
  if (strategy == "S1"):
124
+ formatted_prompt = f'''Output any {task_ling_ent} in the following sentence one per line: "{message}"'''
125
  elif (strategy == "S2"):
126
+ formatted_prompt = f'''POS tag the following sentence using Universal POS tag set: "{message}"'''
127
  elif (strategy == "S3"):
128
+ formatted_prompt = f'''POS tag the following sentence using Universal POS tag set: "{message}"'''
129
  elif (task_name == "Chunking"):
130
  if (strategy == "S1"):
131
+ formatted_prompt = f'''Output any {task_ling_ent} in the following sentence one per line: "{message}"'''
132
  elif (strategy == "S2"):
133
+ formatted_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags: "{message}"'''
134
  elif (strategy == "S3"):
135
+ formatted_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags: "{message}"'''
136
 
137
  print('Vicuna Strategy Fn - Prompt + Context:')
138
  print(formatted_prompt)
 
153
  formatted_prompt = ""
154
  if (task_name == "POS Tagging"):
155
  if (strategy == "S1"):
156
+ formatted_prompt = f'''Output any {task_ling_ent} in the following sentence one per line: "{message}"'''
157
  elif (strategy == "S2"):
158
+ formatted_prompt = f'''POS tag the following sentence using Universal POS tag set: "{message}"'''
159
  elif (strategy == "S3"):
160
+ formatted_prompt = f'''POS tag the following sentence using Universal POS tag set: "{message}"'''
161
  elif (task_name == "Chunking"):
162
  if (strategy == "S1"):
163
+ formatted_prompt = f'''Output any {task_ling_ent} in the following sentence one per line: "{message}"'''
164
  elif (strategy == "S2"):
165
+ formatted_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags: "{message}"'''
166
  elif (strategy == "S3"):
167
+ formatted_prompt = f'''Chunk the following sentence in CoNLL 2000 format with BIO tags: "{message}"'''
168
 
169
  # print('Llama Strategies - Prompt + Context:')
170
  # print(formatted_prompt)