research14 commited on
Commit
bfc086f
·
1 Parent(s): 323ed1c

added anther model to test

Browse files
Files changed (1) hide show
  1. app.py +30 -2
app.py CHANGED
@@ -54,7 +54,7 @@ model_mapping = {
54
  #'vicuna-7b': 'lmsys/vicuna-7b-v1.3',
55
  #'vicuna-13b': 'lmsys/vicuna-13b-v1.3',
56
  #'vicuna-33b': 'lmsys/vicuna-33b-v1.3',
57
- #'fastchat-t5': 'lmsys/fastchat-t5-3b-v1.0',
58
  #'llama-7b': './llama/hf/7B',
59
  #'llama-13b': './llama/hf/13B',
60
  #'llama-30b': './llama/hf/30B',
@@ -98,7 +98,7 @@ gpt_pipeline = pipeline(task="text-generation", model="gpt2")
98
  #vicuna7b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-7b-v1.3")
99
  #vicuna13b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-13b-v1.3")
100
  #vicuna33b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-33b-v1.3")
101
- #fastchatT5_pipeline = pipeline(task="text2text-generation", model="lmsys/fastchat-t5-3b-v1.0")
102
  #llama7b_pipeline = pipeline(task="text2text-generation", model="./llama/hf/7B")
103
  #llama13b_pipeline = pipeline(task="text2text-generation", model="./llama/hf/13B")
104
  #llama30b_pipeline = pipeline(task="text2text-generation", model="./llama/hf/30B")
@@ -145,6 +145,34 @@ def process_text(model_name, task, text):
145
  response2 = gpt_pipeline(strategy2)[0]['generated_text']
146
  response3 = gpt_pipeline(strategy3)[0]['generated_text']
147
  return (response1, response2, response3)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
 
149
  # Define prompts for each strategy based on the task
150
  #strategy_prompts = {
 
54
  #'vicuna-7b': 'lmsys/vicuna-7b-v1.3',
55
  #'vicuna-13b': 'lmsys/vicuna-13b-v1.3',
56
  #'vicuna-33b': 'lmsys/vicuna-33b-v1.3',
57
+ 'fastchat-t5': 'lmsys/fastchat-t5-3b-v1.0',
58
  #'llama-7b': './llama/hf/7B',
59
  #'llama-13b': './llama/hf/13B',
60
  #'llama-30b': './llama/hf/30B',
 
98
  #vicuna7b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-7b-v1.3")
99
  #vicuna13b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-13b-v1.3")
100
  #vicuna33b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-33b-v1.3")
101
+ fastchatT5_pipeline = pipeline(task="text2text-generation", model="lmsys/fastchat-t5-3b-v1.0")
102
  #llama7b_pipeline = pipeline(task="text2text-generation", model="./llama/hf/7B")
103
  #llama13b_pipeline = pipeline(task="text2text-generation", model="./llama/hf/13B")
104
  #llama30b_pipeline = pipeline(task="text2text-generation", model="./llama/hf/30B")
 
145
  response2 = gpt_pipeline(strategy2)[0]['generated_text']
146
  response3 = gpt_pipeline(strategy3)[0]['generated_text']
147
  return (response1, response2, response3)
148
+ elif model_name == 'fastchat-t5':
149
+ if task == 'POS':
150
+ strategy1 = template_all.format(text)
151
+ strategy2 = prompt2_pos.format(text)
152
+ strategy3 = demon_pos
153
+
154
+ response1 = fastchatT5_pipeline(strategy1)[0]['generated_text']
155
+ response2 = fastchatT5_pipeline(strategy2)[0]['generated_text']
156
+ response3 = fastchatT5_pipeline(strategy3)[0]['generated_text']
157
+ return (response1, response2, response3)
158
+ elif task == 'Chunking':
159
+ strategy1 = template_all.format(text)
160
+ strategy2 = prompt2_chunk.format(text)
161
+ strategy3 = demon_chunk
162
+
163
+ response1 = fastchatT5_pipeline(strategy1)[0]['generated_text']
164
+ response2 = fastchatT5_pipeline(strategy2)[0]['generated_text']
165
+ response3 = fastchatT5_pipeline(strategy3)[0]['generated_text']
166
+ return (response1, response2, response3)
167
+ elif task == 'Parsing':
168
+ strategy1 = template_all.format(text)
169
+ strategy2 = prompt2_parse.format(text)
170
+ strategy3 = demon_parse
171
+
172
+ response1 = fastchatT5_pipeline(strategy1)[0]['generated_text']
173
+ response2 = fastchatT5_pipeline(strategy2)[0]['generated_text']
174
+ response3 = fastchatT5_pipeline(strategy3)[0]['generated_text']
175
+ return (response1, response2, response3)
176
 
177
  # Define prompts for each strategy based on the task
178
  #strategy_prompts = {