Spaces:
Runtime error
Runtime error
Commit
·
fa3ff72
1
Parent(s):
816d981
test
Browse files
app.py
CHANGED
@@ -50,15 +50,9 @@ for i, j in zip(ents, ents_prompt):
|
|
50 |
print(i, j)
|
51 |
|
52 |
model_mapping = {
|
53 |
-
|
54 |
'vicuna-7b': 'lmsys/vicuna-7b-v1.3',
|
55 |
-
|
56 |
-
#'vicuna-33b': 'lmsys/vicuna-33b-v1.3',
|
57 |
-
#'fastchat-t5': 'lmsys/fastchat-t5-3b-v1.0',
|
58 |
-
#'llama-7b': './llama/hf/7B',
|
59 |
-
#'llama-13b': './llama/hf/13B',
|
60 |
-
#'llama-30b': './llama/hf/30B',
|
61 |
-
#'alpaca': './alpaca-7B',
|
62 |
}
|
63 |
|
64 |
with open('sample_uniform_1k_2.txt', 'r') as f:
|
@@ -93,20 +87,14 @@ with open('demonstration_3_42_parse.txt', 'r') as f:
|
|
93 |
# Your existing code
|
94 |
theme = gr.themes.Soft()
|
95 |
|
96 |
-
|
97 |
-
|
98 |
vicuna7b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-7b-v1.3")
|
99 |
-
|
100 |
-
#vicuna33b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-33b-v1.3")
|
101 |
-
#fastchatT5_pipeline = pipeline(task="text2text-generation", model="lmsys/fastchat-t5-3b-v1.0")
|
102 |
-
#llama7b_pipeline = pipeline(task="text2text-generation", model="./llama/hf/7B")
|
103 |
-
#llama13b_pipeline = pipeline(task="text2text-generation", model="./llama/hf/13B")
|
104 |
-
#llama30b_pipeline = pipeline(task="text2text-generation", model="./llama/hf/30B")
|
105 |
-
#alpaca_pipeline = pipeline(task="text2text-generation", model="./alpaca-7B")
|
106 |
|
107 |
# Dropdown options for model and task
|
108 |
model_options = list(model_mapping.keys())
|
109 |
-
task_options = ['POS', 'Chunking'
|
110 |
|
111 |
|
112 |
# Function to process text based on model and task
|
@@ -122,27 +110,18 @@ def process_text(model_name, task, text):
|
|
122 |
strategy2_format = prompt2_pos.format(text)
|
123 |
strategy3_format = demon_pos
|
124 |
|
125 |
-
result1 =
|
126 |
-
result2 =
|
127 |
-
result3 =
|
128 |
return (result1, result2, result3)
|
129 |
elif task == 'Chunking':
|
130 |
strategy1_format = template_all.format(text)
|
131 |
strategy2_format = prompt2_chunk.format(text)
|
132 |
strategy3_format = demon_chunk
|
133 |
|
134 |
-
result1 =
|
135 |
-
result2 =
|
136 |
-
result3 =
|
137 |
-
return (result1, result2, result3)
|
138 |
-
elif task == 'Parsing':
|
139 |
-
strategy1_format = template_all.format(text)
|
140 |
-
strategy2_format = prompt2_parse.format(text)
|
141 |
-
strategy3_format = demon_parse
|
142 |
-
|
143 |
-
result1 = vicuna7b_pipeline(strategy1_format)[0]['generated_text']
|
144 |
-
result2 = vicuna7b_pipeline(strategy2_format)[0]['generated_text']
|
145 |
-
result3 = vicuna7b_pipeline(strategy3_format)[0]['generated_text']
|
146 |
return (result1, result2, result3)
|
147 |
|
148 |
# Gradio interface
|
|
|
50 |
print(i, j)
|
51 |
|
52 |
model_mapping = {
|
53 |
+
'gpt3.5': 'gpt-3.5-turbo-0613',
|
54 |
'vicuna-7b': 'lmsys/vicuna-7b-v1.3',
|
55 |
+
'llama-7b': './llama/hf/7B',
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
}
|
57 |
|
58 |
with open('sample_uniform_1k_2.txt', 'r') as f:
|
|
|
87 |
# Your existing code
|
88 |
theme = gr.themes.Soft()
|
89 |
|
90 |
+
# issue get request for gpt 3.5
|
91 |
+
gpt_pipeline = pipeline(task="text2text-generation", model="gpt3.5")
|
92 |
vicuna7b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-7b-v1.3")
|
93 |
+
llama7b_pipeline = pipeline(task="text2text-generation", model="./llama/hf/7B")
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
|
95 |
# Dropdown options for model and task
|
96 |
model_options = list(model_mapping.keys())
|
97 |
+
task_options = ['POS', 'Chunking'] # remove parsing
|
98 |
|
99 |
|
100 |
# Function to process text based on model and task
|
|
|
110 |
strategy2_format = prompt2_pos.format(text)
|
111 |
strategy3_format = demon_pos
|
112 |
|
113 |
+
result1 = gpt_pipeline(strategy1_format)[0]['generated_text']
|
114 |
+
result2 = gpt_pipeline(strategy2_format)[0]['generated_text']
|
115 |
+
result3 = gpt_pipeline(strategy3_format)[0]['generated_text']
|
116 |
return (result1, result2, result3)
|
117 |
elif task == 'Chunking':
|
118 |
strategy1_format = template_all.format(text)
|
119 |
strategy2_format = prompt2_chunk.format(text)
|
120 |
strategy3_format = demon_chunk
|
121 |
|
122 |
+
result1 = gpt_pipeline(strategy1_format)[0]['generated_text']
|
123 |
+
result2 = gpt_pipeline(strategy2_format)[0]['generated_text']
|
124 |
+
result3 = gpt_pipeline(strategy3_format)[0]['generated_text']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
return (result1, result2, result3)
|
126 |
|
127 |
# Gradio interface
|