Spaces:
Runtime error
Runtime error
Commit
·
323ed1c
1
Parent(s):
99d8a50
removed prompt
Browse files
app.py
CHANGED
@@ -94,7 +94,7 @@ with open('demonstration_3_42_parse.txt', 'r') as f:
|
|
94 |
theme = gr.themes.Soft()
|
95 |
|
96 |
|
97 |
-
gpt_pipeline = pipeline(task="
|
98 |
#vicuna7b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-7b-v1.3")
|
99 |
#vicuna13b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-13b-v1.3")
|
100 |
#vicuna33b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-33b-v1.3")
|
@@ -123,27 +123,27 @@ def process_text(model_name, task, text):
|
|
123 |
strategy2 = prompt2_pos.format(text)
|
124 |
strategy3 = demon_pos
|
125 |
|
126 |
-
response1 = gpt_pipeline(strategy1)
|
127 |
-
response2 = gpt_pipeline(strategy2)
|
128 |
-
response3 = gpt_pipeline(strategy3)
|
129 |
return (response1, response2, response3)
|
130 |
elif task == 'Chunking':
|
131 |
strategy1 = template_all.format(text)
|
132 |
strategy2 = prompt2_chunk.format(text)
|
133 |
strategy3 = demon_chunk
|
134 |
|
135 |
-
response1 = gpt_pipeline(strategy1)
|
136 |
-
response2 = gpt_pipeline(strategy2)
|
137 |
-
response3 = gpt_pipeline(strategy3)
|
138 |
return (response1, response2, response3)
|
139 |
elif task == 'Parsing':
|
140 |
strategy1 = template_all.format(text)
|
141 |
strategy2 = prompt2_parse.format(text)
|
142 |
strategy3 = demon_parse
|
143 |
|
144 |
-
response1 = gpt_pipeline(strategy1)
|
145 |
-
response2 = gpt_pipeline(strategy2)
|
146 |
-
response3 = gpt_pipeline(strategy3)
|
147 |
return (response1, response2, response3)
|
148 |
|
149 |
# Define prompts for each strategy based on the task
|
|
|
94 |
theme = gr.themes.Soft()
|
95 |
|
96 |
|
97 |
+
gpt_pipeline = pipeline(task="text-generation", model="gpt2")
|
98 |
#vicuna7b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-7b-v1.3")
|
99 |
#vicuna13b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-13b-v1.3")
|
100 |
#vicuna33b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-33b-v1.3")
|
|
|
123 |
strategy2 = prompt2_pos.format(text)
|
124 |
strategy3 = demon_pos
|
125 |
|
126 |
+
response1 = gpt_pipeline(strategy1)[0]['generated_text']
|
127 |
+
response2 = gpt_pipeline(strategy2)[0]['generated_text']
|
128 |
+
response3 = gpt_pipeline(strategy3)[0]['generated_text']
|
129 |
return (response1, response2, response3)
|
130 |
elif task == 'Chunking':
|
131 |
strategy1 = template_all.format(text)
|
132 |
strategy2 = prompt2_chunk.format(text)
|
133 |
strategy3 = demon_chunk
|
134 |
|
135 |
+
response1 = gpt_pipeline(strategy1)[0]['generated_text']
|
136 |
+
response2 = gpt_pipeline(strategy2)[0]['generated_text']
|
137 |
+
response3 = gpt_pipeline(strategy3)[0]['generated_text']
|
138 |
return (response1, response2, response3)
|
139 |
elif task == 'Parsing':
|
140 |
strategy1 = template_all.format(text)
|
141 |
strategy2 = prompt2_parse.format(text)
|
142 |
strategy3 = demon_parse
|
143 |
|
144 |
+
response1 = gpt_pipeline(strategy1)[0]['generated_text']
|
145 |
+
response2 = gpt_pipeline(strategy2)[0]['generated_text']
|
146 |
+
response3 = gpt_pipeline(strategy3)[0]['generated_text']
|
147 |
return (response1, response2, response3)
|
148 |
|
149 |
# Define prompts for each strategy based on the task
|