research14 commited on
Commit
8cbc513
·
1 Parent(s): cee52ff
Files changed (1) hide show
  1. app.py +5 -7
app.py CHANGED
@@ -11,7 +11,7 @@ model = AutoModelForCausalLM.from_pretrained(model_name)
11
 
12
  template_single = '''Please output any <{}> in the following sentence one per line without any additional text: "{}"'''
13
 
14
- api_key = ""
15
 
16
  def chat(system_prompt, user_prompt, model = 'gpt-3.5-turbo', temperature = 0, verbose = False):
17
  ''' Normal call of OpenAI API '''
@@ -40,11 +40,7 @@ def format_chat_prompt(message, chat_history, max_convo_length):
40
  prompt = f"{prompt}\nUser: {message}\nAssistant:"
41
  return prompt
42
 
43
- def respond_gpt(new_api_key, tab_name, message, chat_history, max_convo_length = 10):
44
- global api_key
45
- api_key = new_api_key
46
- os.environ['OPENAI_API_TOKEN'] = api_key
47
-
48
  formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
49
  print('Prompt + Context:')
50
  print(formatted_prompt)
@@ -70,6 +66,8 @@ def interface():
70
  tab_name = gr.Dropdown(["Noun", "Determiner", "Noun phrase", "Verb phrase", "Dependent clause", "T-units"], label="Linguistic Entity")
71
  btn = gr.Button("Submit")
72
 
 
 
73
  # prompt = template_single.format(tab_name, textbox_prompt)
74
 
75
  gr.Markdown("Strategy 1 QA-Based Prompting")
@@ -95,7 +93,7 @@ def interface():
95
  textbox_prompt.submit(respond, inputs=[textbox_prompt, vicuna_S2_chatbot], outputs=[textbox_prompt, vicuna_S2_chatbot])
96
  textbox_prompt.submit(respond, inputs=[textbox_prompt, vicuna_S3_chatbot], outputs=[textbox_prompt, vicuna_S3_chatbot])
97
 
98
- btn.click(respond_gpt, inputs=[api_key_input, tab_name, textbox_prompt, gpt_S1_chatbot], outputs=[api_key_input, tab_name, textbox_prompt, gpt_S1_chatbot])
99
 
100
  with gr.Blocks() as demo:
101
  gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
 
11
 
12
  template_single = '''Please output any <{}> in the following sentence one per line without any additional text: "{}"'''
13
 
14
+ global api_key
15
 
16
  def chat(system_prompt, user_prompt, model = 'gpt-3.5-turbo', temperature = 0, verbose = False):
17
  ''' Normal call of OpenAI API '''
 
40
  prompt = f"{prompt}\nUser: {message}\nAssistant:"
41
  return prompt
42
 
43
+ def respond_gpt(tab_name, message, chat_history, max_convo_length = 10):
 
 
 
 
44
  formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
45
  print('Prompt + Context:')
46
  print(formatted_prompt)
 
66
  tab_name = gr.Dropdown(["Noun", "Determiner", "Noun phrase", "Verb phrase", "Dependent clause", "T-units"], label="Linguistic Entity")
67
  btn = gr.Button("Submit")
68
 
69
+ os.environ['OPENAI_API_TOKEN'] = api_key_input
70
+
71
  # prompt = template_single.format(tab_name, textbox_prompt)
72
 
73
  gr.Markdown("Strategy 1 QA-Based Prompting")
 
93
  textbox_prompt.submit(respond, inputs=[textbox_prompt, vicuna_S2_chatbot], outputs=[textbox_prompt, vicuna_S2_chatbot])
94
  textbox_prompt.submit(respond, inputs=[textbox_prompt, vicuna_S3_chatbot], outputs=[textbox_prompt, vicuna_S3_chatbot])
95
 
96
+ btn.click(respond_gpt, inputs=[tab_name, textbox_prompt, gpt_S1_chatbot], outputs=[tab_name, textbox_prompt, gpt_S1_chatbot])
97
 
98
  with gr.Blocks() as demo:
99
  gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")