research14 commited on
Commit
1b46f72
·
1 Parent(s): 5cb31ec
Files changed (1) hide show
  1. app.py +12 -12
app.py CHANGED
@@ -27,7 +27,7 @@ syntags = ['NP', 'S', 'VP', 'ADJP', 'ADVP', 'SBAR', 'TOP', 'PP', 'POS', 'NAC', "
27
  'WHADVP', 'NX', 'PRT', 'VBZ', 'VBP', 'MD', 'NN', 'WHPP', 'SQ', 'SBARQ', 'LST', 'INTJ', 'X', 'UCP', 'CONJP', 'NNP', 'CD', 'JJ',
28
  'VBD', 'WHADJP', 'PRP', 'RRC', 'NNS', 'SYM', 'CC']
29
 
30
- openai.api_key = "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"
31
 
32
  # determinant vs. determiner
33
  # https://wikidiff.com/determiner/determinant
@@ -94,8 +94,8 @@ with open('demonstration_3_42_parse.txt', 'r') as f:
94
  theme = gr.themes.Soft()
95
 
96
 
97
- #gpt_pipeline = pipeline(task="text-generation", model="gpt2")
98
- vicuna7b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-7b-v1.3")
99
  #vicuna13b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-13b-v1.3")
100
  #vicuna33b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-33b-v1.3")
101
  #fastchatT5_pipeline = pipeline(task="text2text-generation", model="lmsys/fastchat-t5-3b-v1.0")
@@ -123,27 +123,27 @@ def process_text(model_name, task, text):
123
  strategy2 = prompt2_pos.format(text)
124
  strategy3 = demon_pos
125
 
126
- response1 = vicuna7b_pipeline(strategy1)[0]['generated_text']
127
- response2 = vicuna7b_pipeline(strategy2)[0]['generated_text']
128
- response3 = vicuna7b_pipeline(strategy3)[0]['generated_text']
129
  return (response1, response2, response3)
130
  elif task == 'Chunking':
131
  strategy1 = template_all.format(text)
132
  strategy2 = prompt2_chunk.format(text)
133
  strategy3 = demon_chunk
134
 
135
- response1 = vicuna7b_pipeline(strategy1)[0]['generated_text']
136
- response2 = vicuna7b_pipeline(strategy2)[0]['generated_text']
137
- response3 = vicuna7b_pipeline(strategy3)[0]['generated_text']
138
  return (response1, response2, response3)
139
  elif task == 'Parsing':
140
  strategy1 = template_all.format(text)
141
  strategy2 = prompt2_parse.format(text)
142
  strategy3 = demon_parse
143
 
144
- response1 = vicuna7b_pipeline(strategy1)[0]['generated_text']
145
- response2 = vicuna7b_pipeline(strategy2)[0]['generated_text']
146
- response3 = vicuna7b_pipeline(strategy3)[0]['generated_text']
147
  return (response1, response2, response3)
148
 
149
  # Gradio interface
 
27
  'WHADVP', 'NX', 'PRT', 'VBZ', 'VBP', 'MD', 'NN', 'WHPP', 'SQ', 'SBARQ', 'LST', 'INTJ', 'X', 'UCP', 'CONJP', 'NNP', 'CD', 'JJ',
28
  'VBD', 'WHADJP', 'PRP', 'RRC', 'NNS', 'SYM', 'CC']
29
 
30
+ openai.api_key = " "
31
 
32
  # determinant vs. determiner
33
  # https://wikidiff.com/determiner/determinant
 
94
  theme = gr.themes.Soft()
95
 
96
 
97
+ gpt_pipeline = pipeline(task="text-generation", model="gpt2")
98
+ #vicuna7b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-7b-v1.3")
99
  #vicuna13b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-13b-v1.3")
100
  #vicuna33b_pipeline = pipeline(task="text2text-generation", model="lmsys/vicuna-33b-v1.3")
101
  #fastchatT5_pipeline = pipeline(task="text2text-generation", model="lmsys/fastchat-t5-3b-v1.0")
 
123
  strategy2 = prompt2_pos.format(text)
124
  strategy3 = demon_pos
125
 
126
+ response1 = gpt_pipeline(strategy1)[0]['generated_text']
127
+ response2 = gpt_pipeline(strategy2)[0]['generated_text']
128
+ response3 = gpt_pipeline(strategy3)[0]['generated_text']
129
  return (response1, response2, response3)
130
  elif task == 'Chunking':
131
  strategy1 = template_all.format(text)
132
  strategy2 = prompt2_chunk.format(text)
133
  strategy3 = demon_chunk
134
 
135
+ response1 = gpt_pipeline(strategy1)[0]['generated_text']
136
+ response2 = gpt_pipeline(strategy2)[0]['generated_text']
137
+ response3 = gpt_pipeline(strategy3)[0]['generated_text']
138
  return (response1, response2, response3)
139
  elif task == 'Parsing':
140
  strategy1 = template_all.format(text)
141
  strategy2 = prompt2_parse.format(text)
142
  strategy3 = demon_parse
143
 
144
+ response1 = gpt_pipeline(strategy1)[0]['generated_text']
145
+ response2 = gpt_pipeline(strategy2)[0]['generated_text']
146
+ response3 = gpt_pipeline(strategy3)[0]['generated_text']
147
  return (response1, response2, response3)
148
 
149
  # Gradio interface